text stringlengths 4 1.02M | meta dict |
|---|---|
import os
from datetime import timedelta
from flask import Flask, session, request, redirect, render_template, jsonify, send_file
from flask_assets import Environment, Bundle
from lib import tweet
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ['SECRET_KEY']
app.config['JSON_AS_ASCII'] = False
assets = Environment(app)
assets.url = app.static_url_path
scss = Bundle('scss/style.scss', filters='pyscss', output='css/style.css')
assets.register('style_scss', scss)
logout_page = '/logout'
login_page = '/top'
class TweetError(Exception):
pass
class TokenError(Exception):
pass
@app.before_request
def before_request():
if request.path == '/login':
return
elif request.path == logout_page:
return
elif request.path == login_page:
return
elif request.path[-4:] == ".css":
return
elif request.path[-3:] == ".js":
return
elif check_token():
return
else:
return redirect(login_page)
@app.route('/login', methods=['GET'])
def login():
session.permanent = True
app.permanent_session_lifetime = timedelta(days=10)
try:
redirect_url = tweet.get_redirect_url()
except tweet.RequestDenied as detail:
app.logger.error(detail)
clean_session()
raise
return redirect(redirect_url)
@app.route(login_page, methods=['GET'])
def top():
return render_template('top.html')
@app.route(logout_page, methods=['GET'])
def logout():
clean_session()
return redirect(login_page)
@app.route('/', methods=['GET'])
def index():
try:
request_token = get_request_token()
access_token = get_access_token(request_token)
except TokenError as detail:
app.logger.error(detail)
return redirect(login_page)
except tweet.RequestDenied as detail:
app.logger.error(detail)
return redirect(logout_page)
except TokenError:
return redirect(logout_page)
return render_template('index.html', screen_name=access_token['screen_name'])
@app.route('/_get_ipaddr')
def get_ipaddr():
if request.headers.getlist("X-Forwarded-For"):
ip = request.headers.getlist("X-Forwarded-For")[0]
else:
ip = request.remote_addr
app.logger.info("ipaddr: {}".format(ip))
return jsonify({'ip': ip})
@app.route('/_get_tweets_head')
def _get_tweets_head():
return render_template('tweets-head.html')
@app.route('/_get_tweet_template')
def _get_tweet_template():
return send_file('templates/tweet.html')
@app.route('/_get_tweets_js', methods=['POST'])
def _get_tweets_js():
app.logger.debug("_get_tweets_js request: {}".format(request.json))
try:
request_token = get_request_token()
access_token = get_access_token(request_token)
tweets = tweet.get_tweets(access_token, request.json['twtype'], request.json['params'])
except TokenError as detail:
app.logger.error(detail)
return redirect(login_page)
except tweet.RequestDenied as detail:
app.logger.error(detail)
return jsonify({'error': str(detail)})
try:
check_tweets(tweets)
except TweetError as detail:
app.logger.error(detail)
return jsonify({'error': str(detail)})
send_data = filter_data(request.json['twtype'], tweets)
return jsonify(send_data)
@app.route('/_get_oath2_tweets_js', methods=['POST'])
def _get_oath2_tweets():
app.logger.debug("_get_oath2_tweets request: {}".format(request.json))
try:
access_token = get_oath2_access_token()
tweets = tweet.get_oath2_tweets(access_token, request.json['twtype'], request.json['params'])
except tweet.RequestDenied as detail:
app.logger.error(detail)
app.logger.error(detail)
return jsonify({'error': str(detail)})
try:
check_tweets(tweets)
except TweetError as detail:
app.logger.error(detail)
return jsonify({'error': str(detail)})
send_data = filter_data(request.json['twtype'], tweets)
return jsonify(send_data)
@app.route('/_get_tweets', methods=['POST'])
def _get_tweets():
app.logger.debug("_get_tweets request: {}".format(request.json))
try:
request_token = get_request_token()
access_token = get_access_token(request_token)
tweets = tweet.get_tweets(access_token, request.json['twtype'], request.json['params'])
except TokenError as detail:
app.logger.error(detail)
return redirect(login_page)
except tweet.RequestDenied as detail:
app.logger.error(detail)
return redirect(logout_page)
try:
check_tweets(tweets)
except TweetError as detail:
app.logger.error(detail)
return render_template('error.html', message=detail)
return render_tweets(request.json, tweets)
@app.route('/_post_tweets', methods=['POST'])
def _post_tweets():
app.logger.debug("_post_tweets request: {}".format(request.json))
try:
request_token = get_request_token()
access_token = get_access_token(request_token)
tweets = tweet.get_tweets(access_token, request.json['twtype'], request.json['params'])
except TokenError as detail:
app.logger.error(detail)
return redirect(login_page)
except tweet.RequestDenied as detail:
app.logger.error(detail)
return redirect(logout_page)
try:
check_tweets(tweets)
except TweetError as detail:
app.logger.error(detail)
return detail.args
return "success"
def get_oath2_access_token():
if 'oath2_access_token' in session:
access_token = session['oath2_access_token']
else:
try:
access_token = tweet.get_oath2_access_token()
session['oath2_access_token'] = access_token
except tweet.RequestDenied:
raise
return access_token
def get_request_token():
if 'request_token' in session:
request_token = session['request_token']
else:
oauth_token = request.args.get('oauth_token')
oauth_verifier = request.args.get('oauth_verifier')
if oauth_token is None or oauth_verifier is None:
raise TokenError('cannot get request token')
else:
request_token = {'oauth_token': oauth_token,
'oauth_verifier': oauth_verifier}
session['request_token'] = request_token
return request_token
def get_access_token(request_token):
if 'access_token' in session:
access_token = session['access_token']
else:
try:
access_token = tweet.get_access_token(request_token)
app.logger.info("screen_name: {}".format(access_token['screen_name']))
session['access_token'] = access_token
except tweet.RequestDenied:
raise
return access_token
def check_token():
if 'access_token' in session:
return True
if 'request_token' in session:
return True
try:
get_request_token()
except TokenError:
return False
else:
return True
def check_tweets(tweets):
if isinstance(tweets, dict):
if 'error' in tweets.keys():
raise TweetError(tweets['error'])
elif 'errors' in tweets.keys():
raise TweetError(tweets['errors'][0]['message'])
if len(tweets) == 0:
raise TweetError("Tweet Not Found")
return
def render_tweets(req, tweets):
if req['twtype'] in ["lists"]:
return render_template('lists.html', lists=tweets)
elif req['twtype'] in ["friends"]:
return render_template('lists.html', lists=tweets['users'])
elif req['twtype'] in ["search"]:
if 'statuses' in tweets.keys():
tweets = tweets['statuses']
return render_template('tweets.html', **locals())
else:
return render_template('tweets.html', **locals())
def filter_data(twtype, tweets):
send_data = None
if twtype in ["lists"]:
send_data = filter_lists(tweets)
elif twtype in ["friends"]:
send_data = filter_lists(tweets['users'])
elif twtype in ["search"]:
send_data = filter_tweets(tweets)
elif twtype in ["geosearch"]:
send_data = tweets
else:
send_data = filter_tweets(tweets)
return send_data
def filter_tweets(tweets):
if isinstance(tweets, dict):
if 'statuses' in tweets.keys():
tweets = tweets['statuses']
send_tweets = {'since_id': tweets[-1]['id'],
'max_id': tweets[0]['id'],
'tweets': []}
for tw in tweets:
if 'media' not in tw['entities']:
continue
tweet_id = str(tw['id'])
if 'retweeted_status' in tw.keys():
tw = tw['retweeted_status']
tweet_id_org = str(tw['id'])
tmp_tw = {'id': tweet_id,
'id_org': tweet_id_org,
'media_url_https': [],
'user_id': tw['user']['id'],
'user_screen_name': tw['user']['screen_name'],
'user_name': tw['user']['name'],
'text': tw['text'],
'retweet_count': tw['retweet_count'],
'favorite_count': tw['favorite_count'],
'retweeted': tw['retweeted'],
'favorited': tw['favorited']}
if 'extended_entities' in tw.keys():
medias = tw['extended_entities']['media']
else:
medias = tw['entities']['media']
for media in medias:
tmp_tw['media_url_https'].append(media['media_url_https'])
send_tweets['tweets'].append(tmp_tw)
return send_tweets
def filter_lists(tweets):
send_lists = []
for tw in tweets:
tmp_tw = {'id': str(tw['id']),
'name': tw['name']}
send_lists.append(tmp_tw)
return send_lists
def clean_session():
for s in ['request_token', 'access_token', 'oath2_access_token']:
session.pop(s, None)
if __name__ == "__main__":
app.debug = True
app.run(threaded=True)
| {
"content_hash": "5a6ad8b2fa463d482f5eccaf166bbfe1",
"timestamp": "",
"source": "github",
"line_count": 370,
"max_line_length": 101,
"avg_line_length": 27.345945945945946,
"alnum_prop": 0.6045661197865191,
"repo_name": "suitai/MyTweetApp",
"id": "6b50138fc5e6463cbe6093a2b32cae7f18e3f064",
"size": "10166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2619"
},
{
"name": "HTML",
"bytes": "7870"
},
{
"name": "JavaScript",
"bytes": "11636"
},
{
"name": "Python",
"bytes": "26167"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import numpy as np
import os
import sys
import random
import subprocess
from redis import Redis
import time
sys.path.append(os.path.realpath(".."))
import helpers.utils as hlp
from models.feed_forward import FFDiscrete
class TRPODiscreteTrainer(FFDiscrete):
def __init__(self, sess, args):
FFDiscrete.__init__(self, sess, args)
self.sess = sess
self.config = args['config']
self.env = args['environment']
self.timesteps_per_launch = args['max_pathlength']
self.n_workers = args['n_workers']
self.distributed = args['distributed']
self.timesteps_per_batch = args['timesteps_batch']
self.n_tests = args['n_tests']
self.max_kl = args['max_kl']
self.normalize = args['normalize']
self.scale = args['scale']
self.gamma = args['gamma']
self.value_updates = args['value_updates']
self.save_every = args.get('save_every', 1)
self.sums = self.sumsqrs = self.sumtime = 0
self.timestep = 0
self.create_internal()
self.init_weights()
self.train_scores = []
self.test_scores = []
np.set_printoptions(precision=6)
# Worker parameters:
self.id_worker = args['id_worker']
self.test_mode = args['test_mode']
def create_internal(self):
self.targets = {
"advantage": tf.placeholder(dtype=tf.float32, shape=[None]),
"return": tf.placeholder(dtype=tf.float32, shape=[None]),
"flat_tangent": tf.placeholder(dtype=tf.float32, shape=[None])
}
for i in range(len(self.n_actions)):
self.targets["action_{}".format(i)] = tf.placeholder(dtype=tf.int32, shape=[None])
self.targets["old_dist_{}".format(i)] = tf.placeholder(dtype=tf.float32, shape=[None, self.n_actions[i]])
N = tf.shape(self.targets["advantage"])[0]
base = [N] + [1 for _ in range(len(self.n_actions))]
log_dist = tf.zeros(shape=[N] + self.n_actions)
old_log_dist = tf.zeros(shape=[N] + self.n_actions)
p_n = tf.zeros(shape=[N])
old_p_n = tf.zeros(shape=[N])
for i, n in enumerate(self.n_actions):
right_shape = base[:]
right_shape[i + 1] = n
actions = self.targets["action_{}".format(i)]
action_log_dist = tf.reshape(self.action_logprobs[i], [-1])
p = tf.reshape(tf.gather(action_log_dist, tf.range(0, N) * n + actions), [-1])
old_action_logdist = tf.reshape(self.targets["old_dist_{}".format(i)], [-1])
old_p = tf.reshape(tf.gather(old_action_logdist, tf.range(0, N) * n + actions), [-1])
p_n += p
old_p_n += old_p
log_dist += tf.reshape(action_log_dist, right_shape)
old_log_dist += tf.reshape(old_action_logdist, right_shape)
ratio = tf.exp(p_n - old_p_n)
N = tf.cast(N, tf.float32)
self.loss = -tf.reduce_mean(ratio * self.targets["advantage"])
self.KL = tf.reduce_sum(tf.exp(old_log_dist) * (old_log_dist - log_dist)) / N
self.entropy = tf.reduce_sum(-tf.exp(log_dist) * log_dist) / N
self.policy_grad = hlp.flatgrad(self.loss, self.weights)
KL_firstfixed = tf.reduce_sum(tf.stop_gradient(tf.exp(log_dist)) * (tf.stop_gradient(log_dist) - log_dist)) / N
kl_ff_grads = tf.gradients(KL_firstfixed, self.weights)
w_shapes = list(map(hlp.var_shape, self.weights))
start = 0
tangents = []
for shape in w_shapes:
size = np.prod(shape)
param = tf.reshape(self.targets["flat_tangent"][start:(start + size)], shape)
tangents.append(param)
start += size
gvp = [tf.reduce_sum(g * t) for (g, t) in zip(kl_ff_grads, tangents)]
self.fisher_vector_product = hlp.flatgrad(gvp, self.weights)
self.get_flat = hlp.GetFlat(self.weights, self.sess)
self.set_from_flat = hlp.SetFromFlat(self.weights, self.sess)
value_loss = tf.reduce_mean((self.targets["return"] - self.value) ** 2)
self.value_train_op = tf.train.AdamOptimizer(0.05).minimize(value_loss, var_list=self.value_weights)
def save(self, name):
directory = 'saves/' + name + '/'
if not os.path.exists(directory):
os.makedirs(directory)
directory += 'iteration_{}'.format(self.timestep) + '/'
if not os.path.exists(directory):
os.makedirs(directory)
for i, tensor in enumerate(tf.global_variables()):
value = self.sess.run(tensor)
np.save(directory + 'weight_{}'.format(i), value)
if self.scale != 'off':
np.save(directory + 'sums', self.sums)
np.save(directory + 'sumsquares', self.sumsqrs)
np.save(directory + 'sumtime', self.sumtime)
np.save(directory + 'timestep', np.array([self.timestep]))
np.save(directory + 'train_scores', np.array(self.train_scores))
np.save(directory + 'test_scores', np.array(self.test_scores))
print("Agent successfully saved in folder {}".format(directory))
def load(self, name, iteration=None):
try:
directory = 'saves/' + name + '/'
if not os.path.exists(directory):
print('That directory does not exist!')
raise Exception
if iteration is None:
iteration = np.max([int(x[10:]) for x in [dir for dir in os.walk(directory)][0][1]])
directory += 'iteration_{}'.format(iteration) + '/'
for i, tensor in enumerate(tf.global_variables()):
arr = np.load(directory + 'weight_{}.npy'.format(i))
self.sess.run(tensor.assign(arr))
if self.scale != 'off':
self.sums = np.load(directory + 'sums.npy')
self.sumsqrs = np.load(directory + 'sumsquares.npy')
self.sumtime = np.load(directory + 'sumtime.npy')
self.timestep = np.load(directory + 'timestep.npy')[0]
self.train_scores = np.load(directory + 'train_scores.npy').tolist()
self.test_scores = np.load(directory + 'test_scores.npy').tolist()
print("Agent successfully loaded from folder {}".format(directory))
except:
print("Something is wrong, loading failed")
def init_weights(self):
self.sess.run(tf.global_variables_initializer())
init_weights = [self.sess.run(w) for w in self.weights]
self.set_weights(init_weights)
def make_rollout(self):
variables_server = Redis(port=12000)
if self.scale != 'off':
try:
means = hlp.load_object(variables_server.get("means"))
stds = hlp.load_object(variables_server.get("stds"))
self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))
except:
pass
try:
weights = [hlp.load_object(variables_server.get("weight_{}".format(i))) for i in
range(len(self.weights))]
self.set_weights(weights)
except:
pass
env = self.env
if self.test_mode:
n_tasks = self.n_tests
timesteps_per_worker = 100000000
else:
n_tasks = 10000
timesteps_per_worker = self.timesteps_per_batch // self.n_workers
timestep = 0
i_task = 0
paths = []
while timestep < timesteps_per_worker and i_task < n_tasks:
path = {}
observations, action_tuples, rewards, dist_tuples, timestamps = [], [], [], [], []
sums = np.zeros((1, env.get_observation_space()))
sumsqrs = np.zeros(sums.shape)
env.reset()
while not env.done and env.timestamp < self.timesteps_per_launch:
sums += env.features
sumsqrs += np.square(env.features)
observations.append(env.features[0])
timestamps.append(env.timestamp)
if not self.test_mode:
actions, dist_tuple = self.act(env.features, return_dists=True)
dist_tuples.append(dist_tuple)
else:
actions = self.act(env.features, exploration=False)
env.step(actions)
timestep += 1
action_tuples.append(actions)
rewards.append(env.reward)
path["observations"] = np.array(observations)
path["action_tuples"] = np.array(action_tuples)
path["rewards"] = np.array(rewards)
if not self.test_mode:
path["dist_tuples"] = np.array(dist_tuples)
path["timestamps"] = np.array(timestamps)
path["sumobs"] = sums
path["sumsqrobs"] = sumsqrs
path["terminated"] = env.done
path["total"] = env.get_total_reward()
paths.append(path)
i_task += 1
if self.distributed:
variables_server.set("paths_{}".format(self.id_worker), hlp.dump_object(paths))
else:
self.paths = paths
def train(self):
cmd_server = 'redis-server --port 12000'
p = subprocess.Popen(cmd_server, shell=True, preexec_fn=os.setsid)
self.variables_server = Redis(port=12000)
means = "-"
stds = "-"
if self.scale != 'off':
if self.timestep == 0:
print("Time to measure features!")
if self.distributed:
worker_args = \
{
'config': self.config,
'test_mode': False,
}
hlp.launch_workers(worker_args, self.n_workers)
paths = []
for i in range(self.n_workers):
paths += hlp.load_object(self.variables_server.get("paths_{}".format(i)))
else:
self.test_mode = False
self.make_rollout()
paths = self.paths
for path in paths:
self.sums += path["sumobs"]
self.sumsqrs += path["sumsqrobs"]
self.sumtime += path["observations"].shape[0]
stds = np.sqrt((self.sumsqrs - np.square(self.sums) / self.sumtime) / (self.sumtime - 1))
means = self.sums / self.sumtime
print("Init means: {}".format(means))
print("Init stds: {}".format(stds))
self.variables_server.set("means", hlp.dump_object(means))
self.variables_server.set("stds", hlp.dump_object(stds))
self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))
while True:
print("Iteration {}".format(self.timestep))
start_time = time.time()
if self.distributed:
weights = self.get_weights()
for i, weight in enumerate(weights):
self.variables_server.set("weight_" + str(i), hlp.dump_object(weight))
worker_args = \
{
'config': self.config,
'test_mode': False,
}
hlp.launch_workers(worker_args, self.n_workers)
paths = []
for i in range(self.n_workers):
paths += hlp.load_object(self.variables_server.get("paths_{}".format(i)))
else:
self.test_mode = False
self.make_rollout()
paths = self.paths
observations = np.concatenate([path["observations"] for path in paths])
actions = np.concatenate([path["action_tuples"] for path in paths])
action_dists = []
for _ in range(len(self.n_actions)):
action_dists.append([])
returns = []
advantages = []
for path in paths:
self.sums += path["sumobs"]
self.sumsqrs += path["sumsqrobs"]
self.sumtime += path["rewards"].shape[0]
dists = path["dist_tuples"]
for i in range(len(self.n_actions)):
action_dists[i] += [dist[i][0] for dist in dists]
returns += hlp.discount(path["rewards"], self.gamma, path["timestamps"]).tolist()
values = self.sess.run(self.value, feed_dict={self.state_input: path["observations"]})
values = np.append(values, 0 if path["terminated"] else values[-1])
deltas = (path["rewards"] + self.gamma * values[1:] - values[:-1])
advantages += hlp.discount(deltas, self.gamma, path["timestamps"]).tolist()
returns = np.array(returns)
advantages = np.array(advantages)
if self.normalize == 'ranks':
ranks = np.zeros_like(advantages)
ranks[np.argsort(advantages)] = np.arange(ranks.shape[0], dtype=np.float32) / (ranks.shape[0] - 1)
ranks -= 0.5
advantages = ranks[:]
elif self.normalize == 'center':
advantages -= np.mean(advantages)
advantages /= (np.std(advantages, ddof=1) + 0.001)
feed_dict = {self.state_input: observations,
self.targets["return"]: returns,
self.targets["advantage"]: advantages}
for i in range(len(self.n_actions)):
feed_dict[self.targets["old_dist_{}".format(i)]] = np.array(action_dists[i])
feed_dict[self.targets["action_{}".format(i)]] = actions[:, i]
for i in range(self.value_updates):
self.sess.run(self.value_train_op, feed_dict)
train_rewards = np.array([path["rewards"].sum() for path in paths])
train_lengths = np.array([len(path["rewards"]) for path in paths])
thprev = self.get_flat()
def fisher_vector_product(p):
feed_dict[self.targets["flat_tangent"]] = p
return self.sess.run(self.fisher_vector_product, feed_dict) + 0.1 * p
g = self.sess.run(self.policy_grad, feed_dict)
stepdir = hlp.conjugate_gradient(fisher_vector_product, -g)
shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
lm = np.sqrt(shs / self.max_kl)
fullstep = stepdir / (lm + 1e-18)
def loss_kl(th):
self.set_from_flat(th)
return self.sess.run([self.loss, self.KL], feed_dict=feed_dict)
theta = hlp.linesearch(loss_kl, thprev, fullstep, self.max_kl)
self.set_from_flat(theta)
lossafter, kloldnew = self.sess.run([self.loss, self.KL], feed_dict=feed_dict)
print("Time for testing!")
if self.distributed:
weights = self.get_weights()
for i, weight in enumerate(weights):
self.variables_server.set("weight_" + str(i), hlp.dump_object(weight))
worker_args = \
{
'config': self.config,
'test_mode': True,
}
hlp.launch_workers(worker_args, self.n_workers)
paths = []
for i in range(self.n_workers):
paths += hlp.load_object(self.variables_server.get("paths_{}".format(i)))
else:
self.test_mode = True
self.make_rollout()
paths = self.paths
total_rewards = np.array([path["total"] for path in paths])
eplens = np.array([len(path["rewards"]) for path in paths])
if self.scale != 'full':
stds = np.sqrt((self.sumsqrs - np.square(self.sums) / self.sumtime) / (self.sumtime - 1))
means = self.sums / self.sumtime
self.variables_server.set("means", hlp.dump_object(means))
self.variables_server.set("stds", hlp.dump_object(stds))
self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))
print("""
-------------------------------------------------------------
Mean test score: {test_scores}
Mean train score: {train_scores}
Mean test episode length: {test_eplengths}
Mean train episode length: {train_eplengths}
Max test score: {max_test}
Max train score: {max_train}
KL between old and new {kl}
Loss after update {loss}
Mean of features: {means}
Std of features: {stds}
-------------------------------------------------------------
""".format(
means=means,
stds=stds,
test_scores=np.mean(total_rewards),
test_eplengths=np.mean(eplens),
train_scores=np.mean(train_rewards),
train_eplengths=np.mean(train_lengths),
max_test=np.max(total_rewards),
max_train=np.max(train_rewards),
kl=kloldnew,
loss=lossafter
))
self.timestep += 1
self.train_scores.append(np.mean(train_rewards))
self.test_scores.append(np.mean(total_rewards))
if self.timestep % self.save_every == 0:
self.save(self.config[:-5])
| {
"content_hash": "bb3c62af0cb7aa1f4fce85f3b407f1a9",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 119,
"avg_line_length": 42.87073170731707,
"alnum_prop": 0.5309779825908858,
"repo_name": "Fritz449/SRLF",
"id": "5f0772280cac7d1dd106e4534254b873b3a65efb",
"size": "17577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algos/trpo_discrete.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "180431"
}
],
"symlink_target": ""
} |
class kit:
def __init__(self):
self.form_status = False;
def pwd(self):
import os;
return os.getcwd();
def is_win(self):
import sys
is_windows = hasattr(sys, 'getwindowsversion')
return is_windows
def SP(self):
if self.is_win():
return "\\"
else:
return "/"
def strlen(self,x):
return len(x)
def trim(self,input):
# Return trim data
return str(input).strip();
def str_replace(self,findwords,replace_words,data):
# Data Replace
return data.replace(findwords,replace_words)
def copy(self,source_file,target_file):
import shutil
shutil.copy(source_file,target_file)
def utf8tobig5(self,data):
return data.encode('big5');
def is_dir(self,pathname):
import os
return os.path.isdir(pathname)
def is_file(self,filename):
import os
return os.path.isfile(filename)
def unlink(self,filename):
import os
if self.is_file(filename)==True:
os.remove(filename)
def file_get_contents(self,data_path):
try:
return open(data_path,encoding = 'utf8').read()
except:
return open(data_path).read()
def file_put_contents(self,filename,data,IS_APPEND=False):
f = "";
if IS_APPEND==True:
f = open(filename, 'a');
else:
f = open(filename, 'wb');
f.write(data)
f.close()
def exit(self):
import sys
sys.exit(0)
def echo(self,data):
import sys
sys.stdout.write( ("%s" % data) );
def strtoupper(self,data):
return data.upper();
def strtolower(self,data):
return data.lower()
def explode(self,sep,data):
return data.split(sep)
def implode(self,sep,arr):
return sep.join(arr)
def json_decode(self,data):
import json
return json.loads(data)
def json_encode(self,dict_data):
import json
return json.dumps(dict_data); #, ensure_ascii=False);
def json_encode_utf8(self,dict_data):
import json
return json.dumps(dict_data, ensure_ascii=False)
def json_format(self,json_data):
import json
return json.dumps(self.json_decode(json_data),indent=4, sort_keys=True)
def json_format_utf8(self,json_data):
import json
return json.dumps(self.json_decode(json_data),indent=4, sort_keys=True, ensure_ascii=False)
def array_unique(self,arr):
#return list(set(arr))
out_list = []
for val in arr:
if not val in out_list:
out_list.append(val)
return out_list
def is_string_like(self,data,find_string):
if data.find(find_string) == -1:
return False
else:
return True
def python_version(self):
import sys
if sys.version_info[0] > 2:
return 3;
else:
return 2; | {
"content_hash": "2d37f3f6225c8f00e7c7e191ae9401a1",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 137,
"avg_line_length": 31.867346938775512,
"alnum_prop": 0.5462696125520333,
"repo_name": "shadowjohn/UCL_LIU",
"id": "72e45ee561558d8503dec22711e1a0725c2c6412",
"size": "3547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linux/php.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "790"
},
{
"name": "C",
"bytes": "253678"
},
{
"name": "HTML",
"bytes": "89"
},
{
"name": "PHP",
"bytes": "1751"
},
{
"name": "Python",
"bytes": "2747624"
},
{
"name": "SWIG",
"bytes": "17228"
}
],
"symlink_target": ""
} |
from src.infrastructure.singleton import Singleton
from src.resource.clusterresourcemanager import ClusterResourceManager
from src.resource.storagedomainresourcemanager import StorageDomainResourceManager
from src.resource.hostresourcemanager import HostResourceManager
# @singleton
class ResourceManagersContainer(Singleton):
'''
ResourceManagersContainer container provides ResourceManager(s)
hosting services
'''
__clusterResourceManager = ClusterResourceManager()
__StorageDomainResourceManager = StorageDomainResourceManager()
__hostResourceManager = HostResourceManager()
@staticmethod
def getHostResourceManager():
"""
@return: HostResourceManager
"""
return ResourceManagersContainer.__hostResourceManager
@staticmethod
def getStorageDomainResourceManager():
"""
@return: StorageDomainResourceManager
"""
return ResourceManagersContainer.__StorageDomainResourceManager
@staticmethod
def getClusterResourceManager():
"""
@return: ClusterResourceManager
"""
return ResourceManagersContainer.__clusterResourceManager
| {
"content_hash": "b7399e4f9bc1058674cef75d91292b0e",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 82,
"avg_line_length": 32.638888888888886,
"alnum_prop": 0.7463829787234042,
"repo_name": "oVirt/ovirt-engine-sdk-tests",
"id": "9022ab795dfa9ab76d40cfbb9e2d8de5fd62da91",
"size": "1766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/resource/resourcemanagerscontainer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "88591"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from gaegraph.business_base import NodeSearch, DeleteNode
from produto_app.commands import ListProdutoCommand, SaveProdutoCommand, UpdateProdutoCommand, \
ProdutoPublicForm, ProdutoDetailForm, ProdutoShortForm
def save_produto_cmd(**produto_properties):
"""
Command to save Produto entity
:param produto_properties: a dict of properties to save on model
:return: a Command that save Produto, validating and localizing properties received as strings
"""
return SaveProdutoCommand(**produto_properties)
def update_produto_cmd(produto_id, **produto_properties):
"""
Command to update Produto entity with id equals 'produto_id'
:param produto_properties: a dict of properties to update model
:return: a Command that update Produto, validating and localizing properties received as strings
"""
return UpdateProdutoCommand(produto_id, **produto_properties)
def list_produtos_cmd():
"""
Command to list Produto entities ordered by their creation dates
:return: a Command proceed the db operations when executed
"""
return ListProdutoCommand()
def produto_detail_form(**kwargs):
"""
Function to get Produto's detail form.
:param kwargs: form properties
:return: Form
"""
return ProdutoDetailForm(**kwargs)
def produto_short_form(**kwargs):
"""
Function to get Produto's short form. just a subset of produto's properties
:param kwargs: form properties
:return: Form
"""
return ProdutoShortForm(**kwargs)
def produto_public_form(**kwargs):
"""
Function to get Produto'spublic form. just a subset of produto's properties
:param kwargs: form properties
:return: Form
"""
return ProdutoPublicForm(**kwargs)
def get_produto_cmd(produto_id):
"""
Find produto by her id
:param produto_id: the produto id
:return: Command
"""
return NodeSearch(produto_id)
def delete_produto_cmd(produto_id):
"""
Construct a command to delete a Produto
:param produto_id: produto's id
:return: Command
"""
return DeleteNode(produto_id)
| {
"content_hash": "ecb436d5a5a99d104c014880b2ea6473",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 100,
"avg_line_length": 28.893333333333334,
"alnum_prop": 0.7092754960775265,
"repo_name": "marcosxddh/aula_script",
"id": "bc902be43543bcb409da304a654f435af8938931",
"size": "2191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/apps/produto_app/facade.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "42358"
},
{
"name": "C++",
"bytes": "3500"
},
{
"name": "CSS",
"bytes": "128629"
},
{
"name": "JavaScript",
"bytes": "4226"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "94796"
},
{
"name": "Shell",
"bytes": "4168"
}
],
"symlink_target": ""
} |
import muffin
app = muffin.Application("web")
@app.register("/text")
def text(request):
return "Hello, World!"
| {
"content_hash": "d6b3e6592560c8a7ab15aca13d9640e9",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 31,
"avg_line_length": 14.75,
"alnum_prop": 0.6779661016949152,
"repo_name": "timothycrosley/hug",
"id": "88710fcb282b2b4b7ff9db126f1f3d79cce6509b",
"size": "118",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "benchmarks/http/muffin_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "479"
},
{
"name": "HTML",
"bytes": "58"
},
{
"name": "Python",
"bytes": "444815"
},
{
"name": "Shell",
"bytes": "6103"
}
],
"symlink_target": ""
} |
import datetime
import itertools
import logging
import os
import pathlib
import platform
import subprocess
import sys
import tempfile
import time
from typing import List, NoReturn, Optional
import click
import requests
from datahub.cli.docker_check import (
check_local_docker_containers,
get_client_with_error,
)
from datahub.ingestion.run.pipeline import Pipeline
from datahub.telemetry import telemetry
logger = logging.getLogger(__name__)
NEO4J_AND_ELASTIC_QUICKSTART_COMPOSE_FILE = (
"docker/quickstart/docker-compose.quickstart.yml"
)
ELASTIC_QUICKSTART_COMPOSE_FILE = (
"docker/quickstart/docker-compose-without-neo4j.quickstart.yml"
)
M1_QUICKSTART_COMPOSE_FILE = (
"docker/quickstart/docker-compose-without-neo4j-m1.quickstart.yml"
)
BOOTSTRAP_MCES_FILE = "metadata-ingestion/examples/mce_files/bootstrap_mce.json"
GITHUB_BASE_URL = "https://raw.githubusercontent.com/linkedin/datahub/master"
GITHUB_NEO4J_AND_ELASTIC_QUICKSTART_COMPOSE_URL = (
f"{GITHUB_BASE_URL}/{NEO4J_AND_ELASTIC_QUICKSTART_COMPOSE_FILE}"
)
GITHUB_ELASTIC_QUICKSTART_COMPOSE_URL = (
f"{GITHUB_BASE_URL}/{ELASTIC_QUICKSTART_COMPOSE_FILE}"
)
GITHUB_M1_QUICKSTART_COMPOSE_URL = f"{GITHUB_BASE_URL}/{M1_QUICKSTART_COMPOSE_FILE}"
GITHUB_BOOTSTRAP_MCES_URL = f"{GITHUB_BASE_URL}/{BOOTSTRAP_MCES_FILE}"
@click.group()
def docker() -> None:
"""Helper commands for setting up and interacting with a local
DataHub instance using Docker."""
pass
def _print_issue_list_and_exit(
issues: List[str], header: str, footer: Optional[str] = None
) -> NoReturn:
click.secho(header, fg="bright_red")
for issue in issues:
click.echo(f"- {issue}")
if footer:
click.echo()
click.echo(footer)
sys.exit(1)
def docker_check_impl() -> None:
issues = check_local_docker_containers()
if not issues:
click.secho("✔ No issues detected", fg="green")
else:
_print_issue_list_and_exit(issues, "The following issues were detected:")
@docker.command()
@telemetry.with_telemetry
def check() -> None:
"""Check that the Docker containers are healthy"""
docker_check_impl()
def is_m1() -> bool:
"""Check whether we are running on an M1 machine"""
try:
return (
platform.uname().machine == "arm64" and platform.uname().system == "Darwin"
)
except Exception:
# Catch-all
return False
def should_use_neo4j_for_graph_service(graph_service_override: Optional[str]) -> bool:
if graph_service_override is not None:
if graph_service_override == "elasticsearch":
click.echo("Starting with elasticsearch due to graph-service-impl param\n")
return False
if graph_service_override == "neo4j":
click.echo("Starting with neo4j due to graph-service-impl param\n")
return True
else:
click.secho(
graph_service_override
+ " is not a valid graph service option. Choose either `neo4j` or "
"`elasticsearch`\n",
fg="red",
)
raise ValueError(f"invalid graph service option: {graph_service_override}")
with get_client_with_error() as (client, error):
if error:
click.secho(
"Docker doesn't seem to be running. Did you start it?", fg="red"
)
raise error
if len(client.volumes.list(filters={"name": "datahub_neo4jdata"})) > 0:
click.echo(
"Datahub Neo4j volume found, starting with neo4j as graph service.\n"
"If you want to run using elastic, run `datahub docker nuke` and re-ingest your data.\n"
)
return True
click.echo(
"No Datahub Neo4j volume found, starting with elasticsearch as graph service.\n"
"To use neo4j as a graph backend, run \n"
"`datahub docker quickstart --quickstart-compose-file ./docker/quickstart/docker-compose.quickstart.yml`"
"\nfrom the root of the datahub repo\n"
)
return False
@docker.command()
@click.option(
"--version",
type=str,
default="head",
help="Datahub version to be deployed. If not set, deploy latest",
)
@click.option(
"--build-locally",
type=bool,
is_flag=True,
default=False,
help="Attempt to build the containers locally before starting",
)
@click.option(
"--quickstart-compose-file",
type=click.Path(exists=True, dir_okay=False, readable=True),
default=[],
multiple=True,
help="Use a local docker-compose file instead of pulling from GitHub",
)
@click.option(
"--dump-logs-on-failure",
type=bool,
is_flag=True,
default=False,
help="If true, the docker-compose logs will be printed to console if something fails",
)
@click.option(
"--graph-service-impl",
type=str,
is_flag=False,
default=None,
help="If set, forces docker-compose to use that graph service implementation",
)
@telemetry.with_telemetry
def quickstart(
version: str,
build_locally: bool,
quickstart_compose_file: List[pathlib.Path],
dump_logs_on_failure: bool,
graph_service_impl: Optional[str],
) -> None:
"""Start an instance of DataHub locally using docker-compose.
This command will automatically download the latest docker-compose configuration
from GitHub, pull the latest images, and bring up the DataHub system.
There are options to override the docker-compose config file, build the containers
locally, and dump logs to the console or to a file if something goes wrong.
"""
running_on_m1 = is_m1()
if running_on_m1:
click.echo("Detected M1 machine")
# Run pre-flight checks.
issues = check_local_docker_containers(preflight_only=True)
if issues:
_print_issue_list_and_exit(issues, "Unable to run quickstart:")
quickstart_compose_file = list(
quickstart_compose_file
) # convert to list from tuple
if not quickstart_compose_file:
should_use_neo4j = should_use_neo4j_for_graph_service(graph_service_impl)
if should_use_neo4j and running_on_m1:
click.secho(
"Running with neo4j on M1 is not currently supported, will be using elasticsearch as graph",
fg="red",
)
github_file = (
GITHUB_NEO4J_AND_ELASTIC_QUICKSTART_COMPOSE_URL
if should_use_neo4j and not running_on_m1
else GITHUB_ELASTIC_QUICKSTART_COMPOSE_URL
if not running_on_m1
else GITHUB_M1_QUICKSTART_COMPOSE_URL
)
with tempfile.NamedTemporaryFile(suffix=".yml", delete=False) as tmp_file:
path = pathlib.Path(tmp_file.name)
quickstart_compose_file.append(path)
click.echo(f"Fetching docker-compose file {github_file} from GitHub")
# Download the quickstart docker-compose file from GitHub.
quickstart_download_response = requests.get(github_file)
quickstart_download_response.raise_for_status()
tmp_file.write(quickstart_download_response.content)
logger.debug(f"Copied to {path}")
# set version
os.environ["DATAHUB_VERSION"] = version
base_command: List[str] = [
"docker-compose",
*itertools.chain.from_iterable(
("-f", f"{path}") for path in quickstart_compose_file
),
"-p",
"datahub",
]
# Pull and possibly build the latest containers.
subprocess.run(
[
*base_command,
"pull",
],
check=True,
)
if build_locally:
subprocess.run(
[
*base_command,
"build",
"--pull",
],
check=True,
env={
**os.environ,
"DOCKER_BUILDKIT": "1",
},
)
# Start it up! (with retries)
max_wait_time = datetime.timedelta(minutes=6)
start_time = datetime.datetime.now()
sleep_interval = datetime.timedelta(seconds=2)
up_interval = datetime.timedelta(seconds=30)
up_attempts = 0
while (datetime.datetime.now() - start_time) < max_wait_time:
# Attempt to run docker-compose up every minute.
if (datetime.datetime.now() - start_time) > up_attempts * up_interval:
click.echo()
subprocess.run(base_command + ["up", "-d", "--remove-orphans"])
up_attempts += 1
# Check docker health every few seconds.
issues = check_local_docker_containers()
if not issues:
break
# Wait until next iteration.
click.echo(".", nl=False)
time.sleep(sleep_interval.total_seconds())
else:
# Falls through if the while loop doesn't exit via break.
click.echo()
with tempfile.NamedTemporaryFile(suffix=".log", delete=False) as log_file:
ret = subprocess.run(
base_command + ["logs"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
)
log_file.write(ret.stdout)
if dump_logs_on_failure:
with open(log_file.name, "r") as logs:
click.echo("Dumping docker-compose logs:")
click.echo(logs.read())
click.echo()
_print_issue_list_and_exit(
issues,
header="Unable to run quickstart - the following issues were detected:",
footer="If you think something went wrong, please file an issue at https://github.com/linkedin/datahub/issues\n"
"or send a message in our Slack https://slack.datahubproject.io/\n"
f"Be sure to attach the logs from {log_file.name}",
)
# Handle success condition.
click.echo()
click.secho("✔ DataHub is now running", fg="green")
click.secho(
"Ingest some demo data using `datahub docker ingest-sample-data`,\n"
"or head to http://localhost:9002 (username: datahub, password: datahub) to play around with the frontend.",
fg="green",
)
click.secho(
"Need support? Get in touch on Slack: https://slack.datahubproject.io/",
fg="magenta",
)
@docker.command()
@click.option(
"--path",
type=click.Path(exists=True, dir_okay=False),
help=f"The MCE json file to ingest. Defaults to downloading {BOOTSTRAP_MCES_FILE} from GitHub",
)
@telemetry.with_telemetry
def ingest_sample_data(path: Optional[str]) -> None:
"""Ingest sample data into a running DataHub instance."""
if path is None:
click.echo("Downloading sample data...")
with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as tmp_file:
path = str(pathlib.Path(tmp_file.name))
# Download the bootstrap MCE file from GitHub.
mce_json_download_response = requests.get(GITHUB_BOOTSTRAP_MCES_URL)
mce_json_download_response.raise_for_status()
tmp_file.write(mce_json_download_response.content)
click.echo(f"Downloaded to {path}")
# Verify that docker is up.
issues = check_local_docker_containers()
if issues:
_print_issue_list_and_exit(
issues,
header="Docker is not ready:",
footer="Try running `datahub docker quickstart` first",
)
# Run ingestion.
click.echo("Starting ingestion...")
pipeline = Pipeline.create(
{
"source": {
"type": "file",
"config": {
"filename": path,
},
},
"sink": {
"type": "datahub-rest",
"config": {"server": "http://localhost:8080"},
},
}
)
pipeline.run()
ret = pipeline.pretty_print_summary()
sys.exit(ret)
@docker.command()
@telemetry.with_telemetry
@click.option(
"--keep-data",
type=bool,
is_flag=True,
default=False,
help="Delete data volumes",
)
def nuke(keep_data: bool) -> None:
"""Remove all Docker containers, networks, and volumes associated with DataHub."""
with get_client_with_error() as (client, error):
if error:
click.secho(
"Docker doesn't seem to be running. Did you start it?", fg="red"
)
return
click.echo("Removing containers in the datahub project")
for container in client.containers.list(
all=True, filters={"label": "com.docker.compose.project=datahub"}
):
container.remove(v=True, force=True)
if keep_data:
click.echo("Skipping deleting data volumes in the datahub project")
else:
click.echo("Removing volumes in the datahub project")
for volume in client.volumes.list(
filters={"label": "com.docker.compose.project=datahub"}
):
volume.remove(force=True)
click.echo("Removing networks in the datahub project")
for network in client.networks.list(
filters={"label": "com.docker.compose.project=datahub"}
):
network.remove()
| {
"content_hash": "172adf55c6c63202c799a96ae9c2aeec",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 124,
"avg_line_length": 32.83168316831683,
"alnum_prop": 0.611881785283474,
"repo_name": "linkedin/WhereHows",
"id": "c94f50dd2e5fe8c441422c4e4ff3483602c3a5ae",
"size": "13268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metadata-ingestion/src/datahub/cli/docker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "110129"
},
{
"name": "Dockerfile",
"bytes": "2521"
},
{
"name": "HTML",
"bytes": "131513"
},
{
"name": "Java",
"bytes": "1307442"
},
{
"name": "JavaScript",
"bytes": "148450"
},
{
"name": "Nearley",
"bytes": "2837"
},
{
"name": "Python",
"bytes": "1419332"
},
{
"name": "Shell",
"bytes": "2564"
},
{
"name": "TSQL",
"bytes": "42644"
},
{
"name": "TypeScript",
"bytes": "641014"
}
],
"symlink_target": ""
} |
import socket
import datetime
import random
from future.utils import iteritems
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestercore.job_spec import JobSpec
from pandaharvester.harvestercore.file_spec import FileSpec
from pandaharvester.harvestercore.db_proxy_pool import DBProxyPool as DBProxy
from pandaharvester.harvesterbody.agent_base import AgentBase
from pandaharvester.harvestercore.plugin_factory import PluginFactory
# logger
_logger = core_utils.setup_logger('job_fetcher')
# class to fetch jobs
class JobFetcher(AgentBase):
# constructor
def __init__(self, communicator, queue_config_mapper, single_mode=False):
AgentBase.__init__(self, single_mode)
self.dbProxy = DBProxy()
self.communicator = communicator
self.nodeName = socket.gethostname()
self.queueConfigMapper = queue_config_mapper
self.pluginFactory = PluginFactory()
# main loop
def run(self):
while True:
mainLog = self.make_logger(_logger, 'id={0}'.format(self.get_pid()), method_name='run')
mainLog.debug('getting number of jobs to be fetched')
# get number of jobs to be fetched
nJobsPerQueue = self.dbProxy.get_num_jobs_to_fetch(harvester_config.jobfetcher.nQueues,
harvester_config.jobfetcher.lookupTime)
mainLog.debug('got {0} queues'.format(len(nJobsPerQueue)))
# loop over all queues
for queueName, nJobs in iteritems(nJobsPerQueue):
# check queue
if not self.queueConfigMapper.has_queue(queueName):
continue
tmpLog = self.make_logger(_logger, 'queueName={0}'.format(queueName),
method_name='run')
# get queue
queueConfig = self.queueConfigMapper.get_queue(queueName)
# upper limit
if nJobs > harvester_config.jobfetcher.maxJobs:
nJobs = harvester_config.jobfetcher.maxJobs
# get jobs
default_prodSourceLabel = queueConfig.get_source_label()
pdpm = getattr(queueConfig, 'prodSourceLabelRandomWeightsPermille', {})
choice_list = core_utils.make_choice_list(pdpm=pdpm, default=default_prodSourceLabel)
prodSourceLabel = random.choice(choice_list)
tmpLog.debug('getting {0} jobs for prodSourceLabel {1}'.format(nJobs, prodSourceLabel))
sw = core_utils.get_stopwatch()
siteName = queueConfig.siteName
jobs, errStr = self.communicator.get_jobs(siteName, self.nodeName,
prodSourceLabel,
self.nodeName, nJobs,
queueConfig.getJobCriteria)
tmpLog.info('got {0} jobs with {1} {2}'.format(len(jobs), errStr, sw.get_elapsed_time()))
# convert to JobSpec
if len(jobs) > 0:
# get extractor plugin
if hasattr(queueConfig, 'extractor'):
extractorCore = self.pluginFactory.get_plugin(queueConfig.extractor)
else:
extractorCore = None
jobSpecs = []
fileStatMap = dict()
sw_startconvert = core_utils.get_stopwatch()
for job in jobs:
timeNow = datetime.datetime.utcnow()
jobSpec = JobSpec()
jobSpec.convert_job_json(job)
jobSpec.computingSite = queueName
jobSpec.status = 'starting'
jobSpec.subStatus = 'fetched'
jobSpec.creationTime = timeNow
jobSpec.stateChangeTime = timeNow
jobSpec.configID = queueConfig.configID
jobSpec.set_one_attribute('schedulerID',
'harvester-{0}'.format(harvester_config.master.harvester_id))
if queueConfig.zipPerMB is not None and jobSpec.zipPerMB is None:
jobSpec.zipPerMB = queueConfig.zipPerMB
fileGroupDictList = [jobSpec.get_input_file_attributes()]
if extractorCore is not None:
fileGroupDictList.append(extractorCore.get_aux_inputs(jobSpec))
for fileGroupDict in fileGroupDictList:
for tmpLFN, fileAttrs in iteritems(fileGroupDict):
# check file status
if tmpLFN not in fileStatMap:
fileStatMap[tmpLFN] = self.dbProxy.get_file_status(tmpLFN, 'input',
queueConfig.ddmEndpointIn,
'starting')
# make file spec
fileSpec = FileSpec()
fileSpec.PandaID = jobSpec.PandaID
fileSpec.taskID = jobSpec.taskID
fileSpec.lfn = tmpLFN
fileSpec.endpoint = queueConfig.ddmEndpointIn
fileSpec.scope = fileAttrs['scope']
# set preparing to skip stage-in if the file is (being) taken care of by another job
if 'ready' in fileStatMap[tmpLFN] or 'preparing' in fileStatMap[tmpLFN] \
or 'to_prepare' in fileStatMap[tmpLFN]:
fileSpec.status = 'preparing'
else:
fileSpec.status = 'to_prepare'
if fileSpec.status not in fileStatMap[tmpLFN]:
fileStatMap[tmpLFN][fileSpec.status] = 0
fileStatMap[tmpLFN][fileSpec.status] += 1
if 'INTERNAL_FileType' in fileAttrs:
fileSpec.fileType = fileAttrs['INTERNAL_FileType']
jobSpec.auxInput = JobSpec.AUX_hasAuxInput
else:
fileSpec.fileType = 'input'
if 'INTERNAL_URL' in fileAttrs:
fileSpec.url = fileAttrs['INTERNAL_URL']
jobSpec.add_in_file(fileSpec)
jobSpec.trigger_propagation()
jobSpecs.append(jobSpec)
# insert to DB
tmpLog.debug("Converting of {0} jobs {1}".format(len(jobs),sw_startconvert.get_elapsed_time()))
sw_insertdb =core_utils.get_stopwatch()
self.dbProxy.insert_jobs(jobSpecs)
tmpLog.debug('Insert of {0} jobs {1}'.format(len(jobSpecs), sw_insertdb.get_elapsed_time()))
mainLog.debug('done')
# check if being terminated
if self.terminated(harvester_config.jobfetcher.sleepTime):
mainLog.debug('terminated')
return
| {
"content_hash": "5152639690488a1985ee885553b07d1d",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 116,
"avg_line_length": 58.09848484848485,
"alnum_prop": 0.5095840396401096,
"repo_name": "PanDAWMS/panda-harvester",
"id": "b0d7823b5276b2b55fdb0cad77d07e840b927a2c",
"size": "7669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandaharvester/harvesterbody/job_fetcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1650803"
},
{
"name": "Shell",
"bytes": "21117"
}
],
"symlink_target": ""
} |
import unittest
from food_chain import recite
# Tests adapted from `problem-specifications//canonical-data.json` @ v2.1.0
class FoodChainTest(unittest.TestCase):
def test_fly(self):
expected = [
"I know an old lady who swallowed a fly.",
"I don't know why she swallowed the fly. Perhaps she'll die.",
]
self.assertEqual(recite(1, 1), expected)
def test_spider(self):
expected = [
"I know an old lady who swallowed a spider.",
"It wriggled and jiggled and tickled inside her.",
"She swallowed the spider to catch the fly.",
"I don't know why she swallowed the fly. Perhaps she'll die.",
]
self.assertEqual(recite(2, 2), expected)
def test_bird(self):
expected = [
"I know an old lady who swallowed a bird.",
"How absurd to swallow a bird!",
"She swallowed the bird to catch the spider that "
"wriggled and jiggled and tickled inside her.",
"She swallowed the spider to catch the fly.",
"I don't know why she swallowed the fly. Perhaps she'll die.",
]
self.assertEqual(recite(3, 3), expected)
def test_cat(self):
expected = [
"I know an old lady who swallowed a cat.",
"Imagine that, to swallow a cat!",
"She swallowed the cat to catch the bird.",
"She swallowed the bird to catch the spider that "
"wriggled and jiggled and tickled inside her.",
"She swallowed the spider to catch the fly.",
"I don't know why she swallowed the fly. Perhaps she'll die.",
]
self.assertEqual(recite(4, 4), expected)
def test_dog(self):
expected = [
"I know an old lady who swallowed a dog.",
"What a hog, to swallow a dog!",
"She swallowed the dog to catch the cat.",
"She swallowed the cat to catch the bird.",
"She swallowed the bird to catch the spider that wriggled "
"and jiggled and tickled inside her.",
"She swallowed the spider to catch the fly.",
"I don't know why she swallowed the fly. Perhaps she'll die.",
]
self.assertEqual(recite(5, 5), expected)
def test_goat(self):
expected = [
"I know an old lady who swallowed a goat.",
"Just opened her throat and swallowed a goat!",
"She swallowed the goat to catch the dog.",
"She swallowed the dog to catch the cat.",
"She swallowed the cat to catch the bird.",
"She swallowed the bird to catch the spider that "
"wriggled and jiggled and tickled inside her.",
"She swallowed the spider to catch the fly.",
"I don't know why she swallowed the fly. Perhaps she'll die.",
]
self.assertEqual(recite(6, 6), expected)
def test_cow(self):
expected = [
"I know an old lady who swallowed a cow.",
"I don't know how she swallowed a cow!",
"She swallowed the cow to catch the goat.",
"She swallowed the goat to catch the dog.",
"She swallowed the dog to catch the cat.",
"She swallowed the cat to catch the bird.",
"She swallowed the bird to catch the spider that "
"wriggled and jiggled and tickled inside her.",
"She swallowed the spider to catch the fly.",
"I don't know why she swallowed the fly. Perhaps she'll die.",
]
self.assertEqual(recite(7, 7), expected)
def test_horse(self):
expected = [
"I know an old lady who swallowed a horse.",
"She's dead, of course!",
]
self.assertEqual(recite(8, 8), expected)
def test_multiple_verses(self):
expected = recite(1, 1) + [""] + recite(2, 2) + [""] + recite(3, 3)
self.assertEqual(recite(1, 3), expected)
def test_full_song(self):
expected = []
for n in range(1, 9):
expected += recite(n, n) + [""]
expected.pop()
self.assertEqual(recite(1, 8), expected)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0ef8830f2027bcf41bbf30dfeae52048",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 75,
"avg_line_length": 38.432432432432435,
"alnum_prop": 0.5672761368963901,
"repo_name": "N-Parsons/exercism-python",
"id": "e87b57ef96ae8fd116df77bcbbe70fc230fbbcb9",
"size": "4266",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "exercises/food-chain/food_chain_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "555991"
},
{
"name": "Shell",
"bytes": "1199"
}
],
"symlink_target": ""
} |
import pytz
import json
from django.core.exceptions import ValidationError
from rest_framework import serializers as ser
from rest_framework import exceptions
from api.base.exceptions import Conflict
from api.base.utils import absolute_reverse, get_user_auth
from website.project.metadata.utils import is_prereg_admin_not_project_admin
from website.exceptions import NodeStateError
from website.project.model import NodeUpdateError
from api.files.serializers import OsfStorageFileSerializer
from api.nodes.serializers import NodeSerializer, NodeProviderSerializer
from api.nodes.serializers import NodeLinksSerializer, NodeLicenseSerializer
from api.nodes.serializers import NodeContributorsSerializer
from api.base.serializers import (IDField, RelationshipField, LinksField, HideIfWithdrawal,
FileCommentRelationshipField, NodeFileHyperLinkField, HideIfRegistration,
ShowIfVersion, VersionedDateTimeField, ValuesListField)
from framework.auth.core import Auth
from osf.exceptions import ValidationValueError
class BaseRegistrationSerializer(NodeSerializer):
title = ser.CharField(read_only=True)
description = ser.CharField(read_only=True)
category_choices = NodeSerializer.category_choices
category_choices_string = NodeSerializer.category_choices_string
category = HideIfWithdrawal(ser.ChoiceField(read_only=True, choices=category_choices, help_text='Choices: ' + category_choices_string))
date_modified = VersionedDateTimeField(source='last_logged', read_only=True)
fork = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_fork'))
collection = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_collection'))
access_requests_enabled = HideIfWithdrawal(ser.BooleanField(read_only=True))
node_license = HideIfWithdrawal(NodeLicenseSerializer(read_only=True))
tags = HideIfWithdrawal(ValuesListField(attr_name='name', child=ser.CharField(), required=False))
public = HideIfWithdrawal(ser.BooleanField(source='is_public', required=False,
help_text='Nodes that are made public will give read-only access '
'to everyone. Private nodes require explicit read '
'permission. Write and admin access are the same for '
'public and private nodes. Administrators on a parent '
'node have implicit read permissions for all child nodes'))
current_user_permissions = HideIfWithdrawal(ser.SerializerMethodField(help_text='List of strings representing the permissions '
'for the current user on this node.'))
pending_embargo_approval = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_pending_embargo',
help_text='The associated Embargo is awaiting approval by project admins.'))
pending_registration_approval = HideIfWithdrawal(ser.BooleanField(source='is_pending_registration', read_only=True,
help_text='The associated RegistrationApproval is awaiting approval by project admins.'))
pending_withdrawal = HideIfWithdrawal(ser.BooleanField(source='is_pending_retraction', read_only=True,
help_text='The registration is awaiting withdrawal approval by project admins.'))
withdrawn = ser.BooleanField(source='is_retracted', read_only=True,
help_text='The registration has been withdrawn.')
date_registered = VersionedDateTimeField(source='registered_date', read_only=True, help_text='Date time of registration.')
date_withdrawn = VersionedDateTimeField(source='retraction.date_retracted', read_only=True, help_text='Date time of when this registration was retracted.')
embargo_end_date = HideIfWithdrawal(ser.SerializerMethodField(help_text='When the embargo on this registration will be lifted.'))
withdrawal_justification = ser.CharField(source='retraction.justification', read_only=True)
template_from = HideIfWithdrawal(ser.CharField(read_only=True, allow_blank=False, allow_null=False,
help_text='Specify a node id for a node you would like to use as a template for the '
'new node. Templating is like forking, except that you do not copy the '
'files, only the project structure. Some information is changed on the top '
'level project by submitting the appropriate fields in the request body, '
'and some information will not change. By default, the description will '
'be cleared and the project will be made private.'))
registration_supplement = ser.SerializerMethodField()
registered_meta = HideIfWithdrawal(ser.SerializerMethodField(
help_text='A dictionary with supplemental registration questions and responses.'))
registered_by = HideIfWithdrawal(RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<registered_user._id>'}
))
registered_from = HideIfWithdrawal(RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<registered_from._id>'}
))
children = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-children',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_count'},
))
comments = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': '<_id>'}
))
contributors = RelationshipField(
related_view='registrations:registration-contributors',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_contrib_count'}
)
implicit_contributors = RelationshipField(
related_view='registrations:registration-implicit-contributors',
related_view_kwargs={'node_id': '<_id>'},
help_text='This feature is experimental and being tested. It may be deprecated.'
)
files = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-providers',
related_view_kwargs={'node_id': '<_id>'}
))
wikis = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-wikis',
related_view_kwargs={'node_id': '<_id>'},
))
forked_from = HideIfWithdrawal(RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<forked_from_id>'}
))
template_node = HideIfWithdrawal(RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<template_node._id>'}
))
license = HideIfWithdrawal(RelationshipField(
related_view='licenses:license-detail',
related_view_kwargs={'license_id': '<node_license.node_license._id>'},
))
logs = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-logs',
related_view_kwargs={'node_id': '<_id>'},
))
forks = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-forks',
related_view_kwargs={'node_id': '<_id>'}
))
node_links = ShowIfVersion(HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-pointers',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_pointers_count'},
help_text='This feature is deprecated as of version 2.1. Use linked_nodes instead.'
)), min_version='2.0', max_version='2.0')
parent = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
))
root = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<root._id>'}
))
affiliated_institutions = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-institutions',
related_view_kwargs={'node_id': '<_id>'}
))
registration_schema = RelationshipField(
related_view='metaschemas:metaschema-detail',
related_view_kwargs={'metaschema_id': '<registered_schema_id>'}
)
registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-registrations',
related_view_kwargs={'node_id': '<_id>'}
))
draft_registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-draft-registrations',
related_view_kwargs={'node_id': '<_id>'}
))
preprints = HideIfWithdrawal(HideIfRegistration(RelationshipField(
related_view='nodes:node-preprints',
related_view_kwargs={'node_id': '<_id>'}
)))
identifiers = HideIfWithdrawal(RelationshipField(
related_view='registrations:identifier-list',
related_view_kwargs={'node_id': '<_id>'}
))
linked_nodes = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-nodes',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_links_count'},
self_view='registrations:node-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'}
))
linked_registrations = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-registrations',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_registration_links_count'},
self_view='registrations:node-registration-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'}
))
view_only_links = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-view-only-links',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_view_only_links_count'},
))
citation = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-citation',
related_view_kwargs={'node_id': '<_id>'}
))
links = LinksField({'self': 'get_registration_url', 'html': 'get_absolute_html_url'})
def get_registration_url(self, obj):
return absolute_reverse('registrations:registration-detail', kwargs={
'node_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_absolute_url(self, obj):
return self.get_registration_url(obj)
def create(self, validated_data):
auth = get_user_auth(self.context['request'])
draft = validated_data.pop('draft')
registration_choice = validated_data.pop('registration_choice', 'immediate')
embargo_lifted = validated_data.pop('lift_embargo', None)
reviewer = is_prereg_admin_not_project_admin(self.context['request'], draft)
try:
draft.validate_metadata(metadata=draft.registration_metadata, reviewer=reviewer, required_fields=True)
except ValidationValueError as e:
raise exceptions.ValidationError(e.message)
registration = draft.register(auth, save=True)
if registration_choice == 'embargo':
if not embargo_lifted:
raise exceptions.ValidationError('lift_embargo must be specified.')
embargo_end_date = embargo_lifted.replace(tzinfo=pytz.utc)
try:
registration.embargo_registration(auth.user, embargo_end_date)
except ValidationError as err:
raise exceptions.ValidationError(err.message)
else:
try:
registration.require_approval(auth.user)
except NodeStateError as err:
raise exceptions.ValidationError(err)
registration.save()
return registration
def get_registered_meta(self, obj):
if obj.registered_meta:
meta_values = obj.registered_meta.values()[0]
try:
return json.loads(meta_values)
except TypeError:
return meta_values
except ValueError:
return meta_values
return None
def get_embargo_end_date(self, obj):
if obj.embargo_end_date:
return obj.embargo_end_date
return None
def get_registration_supplement(self, obj):
if obj.registered_schema:
schema = obj.registered_schema.first()
if schema is None:
return None
return schema.name
return None
def get_current_user_permissions(self, obj):
return NodeSerializer.get_current_user_permissions(self, obj)
def update(self, registration, validated_data):
auth = Auth(self.context['request'].user)
# Update tags
if 'tags' in validated_data:
new_tags = validated_data.pop('tags', [])
try:
registration.update_tags(new_tags, auth=auth)
except NodeStateError as err:
raise Conflict(err.message)
is_public = validated_data.get('is_public', None)
if is_public is not None:
if is_public:
try:
registration.update(validated_data, auth=auth)
except NodeUpdateError as err:
raise exceptions.ValidationError(err.reason)
except NodeStateError as err:
raise exceptions.ValidationError(err.message)
else:
raise exceptions.ValidationError('Registrations can only be turned from private to public.')
return registration
class Meta:
type_ = 'registrations'
class RegistrationSerializer(BaseRegistrationSerializer):
"""
Overrides BaseRegistrationSerializer to add draft_registration, registration_choice, and lift_embargo fields
"""
draft_registration = ser.CharField(write_only=True)
registration_choice = ser.ChoiceField(write_only=True, choices=['immediate', 'embargo'])
lift_embargo = VersionedDateTimeField(write_only=True, default=None, input_formats=['%Y-%m-%dT%H:%M:%S'])
class RegistrationDetailSerializer(BaseRegistrationSerializer):
"""
Overrides BaseRegistrationSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class RegistrationNodeLinksSerializer(NodeLinksSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-pointer-detail',
kwargs={
'node_link_id': obj._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version']
}
)
class RegistrationContributorsSerializer(NodeContributorsSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-contributor-detail',
kwargs={
'user_id': obj.user._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version']
}
)
class RegistrationFileSerializer(OsfStorageFileSerializer):
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<node._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder'
)
comments = FileCommentRelationshipField(related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<node._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': 'get_file_guid'}
)
node = RelationshipField(related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<node._id>'},
help_text='The registration that this file belongs to'
)
class RegistrationProviderSerializer(NodeProviderSerializer):
"""
Overrides NodeProviderSerializer to lead to correct registration file links
"""
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<node._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
never_embed=True
)
| {
"content_hash": "17653f3b80980cdbc9a15ec0971127fb",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 159,
"avg_line_length": 44.86910994764398,
"alnum_prop": 0.6450408401400234,
"repo_name": "binoculars/osf.io",
"id": "bee631d69d8da98f96aaf6f44f570e9ebb3b4978",
"size": "17140",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api/registrations/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "106867"
},
{
"name": "HTML",
"bytes": "236223"
},
{
"name": "JavaScript",
"bytes": "1831128"
},
{
"name": "Mako",
"bytes": "666783"
},
{
"name": "Python",
"bytes": "7866290"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
from parsl.config import Config
from parsl.executors import FluxExecutor
from parsl.providers import SlurmProvider
from parsl.launchers import SrunLauncher
config = Config(
executors=[
FluxExecutor(
provider=SlurmProvider(
partition="YOUR_PARTITION", # e.g. "pbatch", "pdebug"
account="YOUR_ACCOUNT",
launcher=SrunLauncher(overrides="--mpibind=off"),
nodes_per_block=1,
init_blocks=1,
min_blocks=1,
max_blocks=1,
walltime="00:30:00",
# string to prepend to #SBATCH blocks in the submit
# script to the scheduler, e.g.: '#SBATCH -t 50'
scheduler_options='',
# Command to be run before starting a worker, such as:
# 'module load Anaconda; source activate parsl_env'.
worker_init='',
cmd_timeout=120,
),
)
]
)
| {
"content_hash": "c3f71b13552c7803945b0f2a1e45d082",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 70,
"avg_line_length": 34.41379310344828,
"alnum_prop": 0.5340681362725451,
"repo_name": "Parsl/parsl",
"id": "1ced615f4d6267218e727354614392d9cf8bfd1e",
"size": "998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsl/configs/toss3_llnl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1263"
},
{
"name": "CSS",
"bytes": "337"
},
{
"name": "HTML",
"bytes": "12706"
},
{
"name": "Makefile",
"bytes": "4908"
},
{
"name": "Python",
"bytes": "1173869"
},
{
"name": "Shell",
"bytes": "12057"
}
],
"symlink_target": ""
} |
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args+moreargs), **dict(kwargs, **morekwargs))
return _curried
### Begin from Python 2.5 functools.py ########################################
# Summary of changes made to the Python 2.5 code below:
# * swapped ``partial`` for ``curry`` to maintain backwards-compatibility
# in Django.
# * Wrapped the ``setattr`` call in ``update_wrapper`` with a try-except
# block to make it compatible with Python 2.3, which doesn't allow
# assigning to ``__name__``.
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software Foundation.
# All Rights Reserved.
###############################################################################
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes off the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
try:
setattr(wrapper, attr, getattr(wrapped, attr))
except TypeError: # Python 2.3 doesn't allow assigning to __name__.
pass
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr))
# Return the wrapper so this can be used as a decorator via curry()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying curry() to
update_wrapper().
"""
return curry(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
### End from Python 2.5 functools.py ##########################################
def memoize(func, cache, num_args):
"""
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
"""
def wrapper(*args):
mem_args = args[:num_args]
if mem_args in cache:
return cache[mem_args]
result = func(*args)
cache[mem_args] = result
return result
return wraps(func)(wrapper)
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
# When lazy() is called by the __reduce_ex__ machinery to reconstitute the
# __proxy__ class it can't call with *args, so the first item will just be
# a tuple.
if len(resultclasses) == 1 and isinstance(resultclasses[0], tuple):
resultclasses = resultclasses[0]
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__dispatch = None
def __init__(self, args, kw):
self.__func = func
self.__args = args
self.__kw = kw
if self.__dispatch is None:
self.__prepare_class__()
def __reduce_ex__(self, protocol):
return (lazy, (self.__func, resultclasses), self.__dict__)
def __prepare_class__(cls):
cls.__dispatch = {}
for resultclass in resultclasses:
cls.__dispatch[resultclass] = {}
for (k, v) in resultclass.__dict__.items():
# All __promise__ return the same wrapper method, but they
# also do setup, inserting the method into the dispatch
# dict.
meth = cls.__promise__(resultclass, k, v)
if hasattr(cls, k):
continue
setattr(cls, k, meth)
cls._delegate_str = str in resultclasses
cls._delegate_unicode = unicode in resultclasses
assert not (cls._delegate_str and cls._delegate_unicode), "Cannot call lazy() with both str and unicode return types."
if cls._delegate_unicode:
cls.__unicode__ = cls.__unicode_cast
elif cls._delegate_str:
cls.__str__ = cls.__str_cast
__prepare_class__ = classmethod(__prepare_class__)
def __promise__(cls, klass, funcname, func):
# Builds a wrapper around some magic method and registers that magic
# method for the given type and method name.
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = self.__func(*self.__args, **self.__kw)
for t in type(res).mro():
if t in self.__dispatch:
return self.__dispatch[t][funcname](res, *args, **kw)
raise TypeError("Lazy object returned unexpected type.")
if klass not in cls.__dispatch:
cls.__dispatch[klass] = {}
cls.__dispatch[klass][funcname] = func
return __wrapper__
__promise__ = classmethod(__promise__)
def __unicode_cast(self):
return self.__func(*self.__args, **self.__kw)
def __str_cast(self):
return str(self.__func(*self.__args, **self.__kw))
def __cmp__(self, rhs):
if self._delegate_str:
s = str(self.__func(*self.__args, **self.__kw))
elif self._delegate_unicode:
s = unicode(self.__func(*self.__args, **self.__kw))
else:
s = self.__func(*self.__args, **self.__kw)
if isinstance(rhs, Promise):
return -cmp(rhs, s)
else:
return cmp(s, rhs)
def __mod__(self, rhs):
if self._delegate_str:
return str(self) % rhs
elif self._delegate_unicode:
return unicode(self) % rhs
else:
raise AssertionError('__mod__ not supported for non-string types')
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return wraps(func)(__wrapper__)
def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
def wrapper(*args, **kwargs):
for arg in list(args) + kwargs.values():
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy(func, *resultclasses)(*args, **kwargs)
return wraps(func)(wrapper)
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
def __init__(self):
self._wrapped = None
def __getattr__(self, name):
if self._wrapped is None:
self._setup()
return getattr(self._wrapped, name)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is None:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is None:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialise the wrapped object.
"""
raise NotImplementedError
# introspection support:
__members__ = property(lambda self: self.__dir__())
def __dir__(self):
if self._wrapped is None:
self._setup()
return dir(self._wrapped)
class SimpleLazyObject(LazyObject):
"""
A lazy object initialised from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
# For some reason, we have to inline LazyObject.__init__ here to avoid
# recursion
self._wrapped = None
def __str__(self):
if self._wrapped is None: self._setup()
return str(self._wrapped)
def __unicode__(self):
if self._wrapped is None: self._setup()
return unicode(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is None:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
else:
# Changed to use deepcopy from copycompat, instead of copy
# For Python 2.4.
from django.utils.copycompat import deepcopy
return deepcopy(self._wrapped, memo)
# Need to pretend to be the wrapped class, for the sake of objects that care
# about this (especially in equality tests)
def __get_class(self):
if self._wrapped is None: self._setup()
return self._wrapped.__class__
__class__ = property(__get_class)
def __eq__(self, other):
if self._wrapped is None: self._setup()
return self._wrapped == other
def __hash__(self):
if self._wrapped is None: self._setup()
return hash(self._wrapped)
def _setup(self):
self._wrapped = self._setupfunc()
| {
"content_hash": "6119058f3557c45f566a79d172beba76",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 130,
"avg_line_length": 37.690625,
"alnum_prop": 0.581295083326424,
"repo_name": "t11e/django",
"id": "b66fdd3012d0f12ddc039b9569315b5784d0b66e",
"size": "14616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/utils/functional.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "164056"
},
{
"name": "Python",
"bytes": "5708871"
},
{
"name": "Shell",
"bytes": "3459"
}
],
"symlink_target": ""
} |
import six.moves as sm
from gem import vector
def flip(plane):
''' Flips the plane.'''
fA = -plane[0]
fB = -plane[1]
fC = -plane[2]
fD = -plane[3]
fNormal = -plane[4]
return [fA, fB, fC, fD, fNormal]
def normalize(pdata):
''' Return the normalized plane.'''
vec = vector.Vector(3, data=pdata)
vecN = vec.normalize()
length = vecN.magnitude()
if length is not 0:
return vecN.vector[0], vecN.vector[1], vecN.vector[2], pdata[3] / length
else:
print("Plane fail to normalize due to zero division.")
return 0.0, 0.0, 0.0, 0.0
class Plane(object):
def __init__(self):
''' Plane class constructor. '''
self.normal = vector.Vector(3, data=[0.0, 0.0, 0.0])
self.a = 0
self.b = 0
self.c = 0
self.d = 0
def clone(self):
'''Create a new Plane with similar propertise.'''
nPlane = Plane()
nPlane.normal = self.normal.clone()
nPlane.a = self.a
nPlane.b = self.b
nPlane.c = self.c
nPlane.d = self.d
return nPlane
def fromCoeffs(self, a, b, c, d):
''' Create the plane from A,B,C,D. '''
self.a = a
self.b = b
self.c = c
self.d = d
self.normal = vector.cross(b - a, c - a).normalize()
def fromPoints(self, a, b, c):
'''Calculate the plane from A,B,C.'''
self.a = a
self.b = b
self.c = c
self.normal = vector.cross(b - a, c - a).normalize()
self.d = self.normal.dot(self.a)
def i_flip(self):
''' Flip the plane in its place. '''
data = flip([self.a, self.b, self.c, self.d, self.normal])
self.a = data[0]
self.b = data[1]
self.c = data[2]
self.d = data[3]
self.normal = data[4]
return self
def flip(self):
''' Return a flipped plane. '''
nPlane = Plane()
data = flip([self.a, self.b, self.c, self.d, self.normal])
nPlane.a = data[0]
nPlane.b = data[1]
nPlane.c = data[2]
nPlane.d = data[3]
nPlane.normal = data[4]
return nPlane
def dot(self, vec):
''' Return the dot product between a plane and 4D vector. '''
return self.a * vec.vector[0] + self.b * vec.vector[1] + self.c * vec.vector[2] + self.d * vec.vector[3]
def i_normalize(self):
''' Normalize the vector in place. '''
pdata = [self.a, self.b, self.c, self.d]
self.a, self.b, self.c, self.d = normalize(pdata)
return self
def normalize(self):
''' Return the normalized plane.'''
nPlane = Plane().clone()
pdata = [self.a, self.b, self.c, self.d]
nPlane.a, nPlane.b, nPlane.c, nPlane.d = normalize(pdata)
return nPlane
def bestFitNormal(self, vecList):
''' Pass in a list of vectors to find the best fit normal. '''
output = vector.Vector(3).zero()
for i in sm.range(len(vecList)):
output.vector[0] += (vecList[i].vector[2] + vecList[i + 1].vector[2]) * (vecList[i].vector[1] - vecList[i + 1].vector[1])
output.vector[1] += (vecList[i].vector[0] + vecList[i + 1].vector[0]) * (vecList[i].vector[2] - vecList[i + 1].vector[2])
output.vector[2] += (vecList[i].vector[1] + vecList[i + 1].vector[1]) * (vecList[i].vector[0] - vecList[i + 1].vector[0])
return output.normalize()
def bestFitD(self, vecList, bestFitNormal):
''' Returns the best fit D from a list of vectors using the best fit normal. '''
val = 0.0
for vec in vecList:
val += vec.dot(bestFitNormal)
return val / len(vecList)
def point_location(self, plane, point):
''' Returns the location of the point. Point is a tuple. '''
# If s > 0 then the point is on the same side as the normal. (front)
# If s < 0 then the point is on the opposide side of the normal. (back)
# If s = 0 then the point lies on the plane.
s = plane.a * point[0] + plane.b * point[1] + plane.c * point[2] + plane.d
if s > 0:
return 1
elif s < 0:
return -1
elif s == 0:
return 0
else:
print("Not a clue where the point is.")
| {
"content_hash": "331159b204442e168b945bea45a228dc",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 133,
"avg_line_length": 33.08461538461538,
"alnum_prop": 0.5370843989769821,
"repo_name": "explosiveduck/pyGameMath",
"id": "2c78a39b60ddcabdfc5533e6f85d8e7117a215a0",
"size": "4301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gem/plane.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "81367"
}
],
"symlink_target": ""
} |
"""
The ios lacp fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from copy import deepcopy
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.ios.argspec.lacp.lacp import LacpArgs
class LacpFacts(object):
""" The ios lacp fact class
"""
def __init__(self, module, subspec='config', options='options'):
self._module = module
self.argument_spec = LacpArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for lacp
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if connection:
pass
if not data:
data = connection.get('show lacp sys-id')
obj = {}
if data:
lacp_obj = self.render_config(self.generated_spec, data)
if lacp_obj:
obj = lacp_obj
ansible_facts['ansible_network_resources'].pop('lacp', None)
facts = {}
params = utils.validate_config(self.argument_spec, {'config': obj})
facts['lacp'] = utils.remove_empties(params['config'])
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
def render_config(self, spec, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
config = deepcopy(spec)
config['system']['priority'] = int(conf.split(',')[0])
return utils.remove_empties(config)
| {
"content_hash": "b8a7ed5c83e01e0cd65fab8ba5ad7479",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 75,
"avg_line_length": 30.53846153846154,
"alnum_prop": 0.6192275398824517,
"repo_name": "thaim/ansible",
"id": "455460fc94100224c3df7dd4b2c05eb1b18e9a70",
"size": "2528",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/module_utils/network/ios/facts/lacp/lacp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
"""This example illustrates how to create an account.
Note by default this account will only be accessible via parent MCC.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CreateAccountService.mutate
Api: AdWordsOnly
"""
__author__ = ('api.kwinter@gmail.com (Kevin Winter)'
'Joseph DiLallo')
from datetime import datetime
from googleads import adwords
def main(client):
# Initialize appropriate service.
managed_customer_service = client.GetService(
'ManagedCustomerService', version='v201506')
today = datetime.today().strftime('%Y%m%d %H:%M:%S')
# Construct operations and add campaign.
operations = [{
'operator': 'ADD',
'operand': {
'name': 'Account created with ManagedCustomerService on %s' % today,
'currencyCode': 'EUR',
'dateTimeZone': 'Europe/London',
}
}]
# Create the account. It is possible to create multiple accounts with one
# request by sending an array of operations.
accounts = managed_customer_service.mutate(operations)
# Display results.
for account in accounts['value']:
print ('Account with customer ID \'%s\' was successfully created.'
% account['customerId'])
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
| {
"content_hash": "0d3481801aa8328d302986b2d59aea73",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 78,
"avg_line_length": 30.274509803921568,
"alnum_prop": 0.7007772020725389,
"repo_name": "coxmediagroup/googleads-python-lib",
"id": "273f311b3b1e8e7b95cd951987a48de813341f0f",
"size": "2162",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/adwords/v201506/account_management/create_account.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2535137"
}
],
"symlink_target": ""
} |
import rospy
from rosgraph_msgs.msg import Log
from std_msgs.msg import String
from ros_wild import Subscriber
def callback_log(msg):
u""" callback for /rosout and /rosout_agg """
print(msg.msg)
def callback_string(msg):
u""" callback for some string topic """
print(msg.data)
def main():
rospy.init_node("example_node")
sub = Subscriber(r"/.+")
sub.register_callback(Log, callback_log)
sub.register_callback(String, callback_string)
rospy.spin()
if __name__ == '__main__':
main()
| {
"content_hash": "e40b3733a05eeb4490382f0b8a38158f",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 50,
"avg_line_length": 20.346153846153847,
"alnum_prop": 0.6597353497164461,
"repo_name": "yuma-m/ros_wild",
"id": "3cabb29110f1715f697ffc8a5a2fa47b3cf7405f",
"size": "554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/subscribe.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "431"
},
{
"name": "Python",
"bytes": "8060"
}
],
"symlink_target": ""
} |
"""
Tests that data is retained after an import/export or export/import cycle.
"""
from __future__ import unicode_literals
from __future__ import print_function
from lxml import etree
import os
from shutil import rmtree, copytree
from subprocess import check_call
from tempfile import mkdtemp
from unittest import TestCase
from xbundle import XBundle
from tests.util import clean_xml, file_from_string
from tests.data import expected as expected_data, input as input_data
def _normalize_xml(dirname):
"""Removes whitespace from xml files in the given directory."""
for dname, _, files in os.walk(dirname):
for fname in files:
fpath = os.path.join(dname, fname)
if not fpath.endswith('.xml'):
continue
with open(fpath) as f:
s = f.read()
s = clean_xml(s)
with open(fpath, 'w') as f:
f.write(s)
class TestImportExport(TestCase):
"""
Test that data is retained after an import/export or export/import cycle.
"""
def test_export_import(self):
"""
Test export then import.
"""
bundle = XBundle()
cxmls = input_data.COURSE
pxmls = input_data.POLICIES
bundle.set_course(etree.XML(cxmls))
bundle.add_policies(etree.XML(pxmls))
bundle.add_about_file("overview.html", "hello overview")
xbin = str(bundle)
tdir = mkdtemp()
try:
bundle.export_to_directory(tdir)
# Test round- trip.
xb2 = XBundle()
xb2.import_from_directory(os.path.join(tdir, 'mitx.01'))
xbreloaded = str(xb2)
self.assertEqual(clean_xml(xbin), clean_xml(xbreloaded))
finally:
rmtree(tdir)
def test_import_export(self): # pylint: disable=no-self-use
"""
Test import then export.
"""
bundle = XBundle()
bundle.import_from_directory(os.path.join("input_testdata", "mitx.01"))
tdir = mkdtemp()
try:
bundle.export_to_directory(tdir)
knownDir = os.path.join("input_testdata", "mitx.01.exported")
knownTempDir = os.path.join(tdir, 'mitx.01.exported')
newDir = os.path.join(tdir, "mitx.01")
# Transform xml files to remove spaces. This allows for cross tests
# to pass across platforms with slightly different xml serializers
# (see: travis). We copy the files for easy cleanup.
copytree(knownDir, knownTempDir)
_normalize_xml(tdir)
check_call([
"diff", "-r", knownTempDir, newDir
])
finally:
rmtree(tdir)
def test_import_url_name(self):
"""
Test that we import url_name as url_name_orig.
"""
bundle = XBundle(keep_urls=True, keep_studio_urls=True)
bundle.import_from_directory(os.path.join('input_testdata', 'mitx.01'))
bundle_string = str(bundle)
expected = expected_data.KEEP_URLS
self.assertEqual(clean_xml(expected), clean_xml(bundle_string))
def test_preserve_url_name(self):
"""
Test that preserve_url_name imports as url_name and not url_name_orig.
"""
bundle = XBundle(
keep_urls=True, keep_studio_urls=True, preserve_url_name=True)
bundle.import_from_directory('input_testdata/mitx.01')
bundle_string = str(bundle)
expected = expected_data.PRESERVE_URL_NAME
self.assertEqual(clean_xml(expected), clean_xml(bundle_string))
def test_save(self):
"""
Test save method.
"""
input_xml = "<xbundle><metadata /><course /></xbundle>"
bundle = XBundle()
bundle.load(file_from_string(input_xml))
self.assertEqual(clean_xml(str(bundle)), clean_xml(input_xml))
curdir = os.getcwd()
tempdir = mkdtemp()
try:
os.chdir(tempdir)
bundle.save()
with open(os.path.join(tempdir, "xbundle.xml")) as f:
self.assertEqual(clean_xml(f.read()), clean_xml(input_xml))
bundle.save(filename="other.xml")
with open(os.path.join(tempdir, "other.xml")) as f:
self.assertEqual(clean_xml(f.read()), clean_xml(input_xml))
handle_path = os.path.join(tempdir, "third.xml")
with open(handle_path, "w") as f:
bundle.save(file_handle=f)
with open(handle_path) as f:
self.assertEqual(clean_xml(f.read()), clean_xml(input_xml))
finally:
os.chdir(curdir)
rmtree(tempdir)
def test_export_and_keep_urls(self):
"""
Test the changes to url_name after export_to_directory and import.
"""
# Note url_name_orig in chapter.
input_xml = input_data.URL_NAME_ORIG_IN_CHAPTER2
bundle = XBundle(keep_urls=True, force_studio_format=True)
bundle.load(file_from_string(input_xml))
# str(bundle) doesn't change input xml, but export_to_directory will.
self.assertEqual(clean_xml(input_xml), clean_xml(str(bundle)))
old_current_dir = os.getcwd()
tempdir = mkdtemp()
try:
os.chdir(tempdir)
bundle.export_to_directory()
bundle2 = XBundle(keep_urls=True, force_studio_format=True)
bundle2.import_from_directory()
expected = expected_data.KEEP_URLS_FORCE_STUDIO_FORMAT
self.assertEqual(clean_xml(expected), clean_xml(str(bundle2)))
finally:
os.chdir(old_current_dir)
rmtree(tempdir)
def test_xml_header(self):
"""
Test removal of xml header. The <?xml ... should not show up in the
output and the XML should still be parsed correctly.
"""
input_xml = input_data.EMPTY_XBUNDLE
bundle = XBundle()
bundle.load(file_from_string(input_xml))
self.assertFalse(str(bundle).startswith("<?xml"))
self.assertEqual(clean_xml(input_xml), clean_xml(str(bundle)))
def test_import_skip_hidden(self):
"""
Test skip_hidden flag.
"""
bundle = XBundle(skip_hidden=True)
path = os.path.join('input_testdata', 'mitx.01')
bundle.import_from_directory(path)
expected = expected_data.SKIP_HIDDEN
self.assertEqual(clean_xml(str(bundle)), clean_xml(expected))
def test_import_large(self):
"""
Test import of a course slightly larger than mitx.01.
"""
bundle = XBundle()
path = os.path.join('input_testdata', 'content-devops-0001')
bundle.import_from_directory(path)
expected_path = os.path.join(
'input_testdata', 'content-devops-0001.out.xml')
with open(expected_path) as f:
self.assertEqual(clean_xml(f.read()), clean_xml(str(bundle)))
tempdir = mkdtemp()
try:
bundle.export_to_directory(tempdir, xml_only=True, newfmt=True)
for _, _, files in os.walk(os.path.join(tempdir, "0.001")):
for filename in files:
# We set xml_only=True so there shouldn't be anything else.
self.assertTrue(filename.endswith(".xml"))
finally:
rmtree(tempdir)
| {
"content_hash": "48b5b60a238afe24100e8be49122a2bd",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 79,
"avg_line_length": 32.69911504424779,
"alnum_prop": 0.5853856562922869,
"repo_name": "mitsei/xbundle",
"id": "ab09f834298647f092b6e4296ede35f1e79935c1",
"size": "7390",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_import_export.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3718"
},
{
"name": "JavaScript",
"bytes": "89619"
},
{
"name": "Python",
"bytes": "48580"
}
],
"symlink_target": ""
} |
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import logging
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def submitTopology(self, name, uploadedJarLocation, jsonConf, topology):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
"""
pass
def submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
- options
"""
pass
def killTopology(self, name):
"""
Parameters:
- name
"""
pass
def killTopologyWithOpts(self, name, options):
"""
Parameters:
- name
- options
"""
pass
def activate(self, name):
"""
Parameters:
- name
"""
pass
def deactivate(self, name):
"""
Parameters:
- name
"""
pass
def rebalance(self, name, options):
"""
Parameters:
- name
- options
"""
pass
def setLogConfig(self, name, config):
"""
Parameters:
- name
- config
"""
pass
def getLogConfig(self, name):
"""
Parameters:
- name
"""
pass
def debug(self, name, component, enable, samplingPercentage):
"""
Enable/disable logging the tuples generated in topology via an internal EventLogger bolt. The component name is optional
and if null or empty, the debug flag will apply to the entire topology.
The 'samplingPercentage' will limit loggging to a percentage of generated tuples.
Parameters:
- name
- component
- enable
- samplingPercentage
"""
pass
def setWorkerProfiler(self, id, profileRequest):
"""
Parameters:
- id
- profileRequest
"""
pass
def getComponentPendingProfileActions(self, id, component_id, action):
"""
Parameters:
- id
- component_id
- action
"""
pass
def uploadNewCredentials(self, name, creds):
"""
Parameters:
- name
- creds
"""
pass
def beginCreateBlob(self, key, meta):
"""
Parameters:
- key
- meta
"""
pass
def beginUpdateBlob(self, key):
"""
Parameters:
- key
"""
pass
def uploadBlobChunk(self, session, chunk):
"""
Parameters:
- session
- chunk
"""
pass
def finishBlobUpload(self, session):
"""
Parameters:
- session
"""
pass
def cancelBlobUpload(self, session):
"""
Parameters:
- session
"""
pass
def getBlobMeta(self, key):
"""
Parameters:
- key
"""
pass
def setBlobMeta(self, key, meta):
"""
Parameters:
- key
- meta
"""
pass
def beginBlobDownload(self, key):
"""
Parameters:
- key
"""
pass
def downloadBlobChunk(self, session):
"""
Parameters:
- session
"""
pass
def deleteBlob(self, key):
"""
Parameters:
- key
"""
pass
def listBlobs(self, session):
"""
Parameters:
- session
"""
pass
def getBlobReplication(self, key):
"""
Parameters:
- key
"""
pass
def updateBlobReplication(self, key, replication):
"""
Parameters:
- key
- replication
"""
pass
def createStateInZookeeper(self, key):
"""
Parameters:
- key
"""
pass
def beginFileUpload(self):
pass
def uploadChunk(self, location, chunk):
"""
Parameters:
- location
- chunk
"""
pass
def finishFileUpload(self, location):
"""
Parameters:
- location
"""
pass
def beginFileDownload(self, file):
"""
Parameters:
- file
"""
pass
def downloadChunk(self, id):
"""
Parameters:
- id
"""
pass
def getNimbusConf(self):
pass
def getClusterInfo(self):
pass
def getLeader(self):
pass
def isTopologyNameAllowed(self, name):
"""
Parameters:
- name
"""
pass
def getTopologyInfo(self, id):
"""
Parameters:
- id
"""
pass
def getTopologyInfoWithOpts(self, id, options):
"""
Parameters:
- id
- options
"""
pass
def getTopologyPageInfo(self, id, window, is_include_sys):
"""
Parameters:
- id
- window
- is_include_sys
"""
pass
def getSupervisorPageInfo(self, id, host, is_include_sys):
"""
Parameters:
- id
- host
- is_include_sys
"""
pass
def getComponentPageInfo(self, topology_id, component_id, window, is_include_sys):
"""
Parameters:
- topology_id
- component_id
- window
- is_include_sys
"""
pass
def getTopologyConf(self, id):
"""
Parameters:
- id
"""
pass
def getTopology(self, id):
"""
Returns the compiled topology that contains ackers and metrics consumsers. Compare {@link #getUserTopology(String id)}.
Parameters:
- id
"""
pass
def getUserTopology(self, id):
"""
Returns the user specified topology as submitted originally. Compare {@link #getTopology(String id)}.
Parameters:
- id
"""
pass
def getTopologyHistory(self, user):
"""
Parameters:
- user
"""
pass
def getOwnerResourceSummaries(self, owner):
"""
Parameters:
- owner
"""
pass
def processWorkerMetrics(self, metrics):
"""
Parameters:
- metrics
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def submitTopology(self, name, uploadedJarLocation, jsonConf, topology):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
"""
self.send_submitTopology(name, uploadedJarLocation, jsonConf, topology)
self.recv_submitTopology()
def send_submitTopology(self, name, uploadedJarLocation, jsonConf, topology):
self._oprot.writeMessageBegin('submitTopology', TMessageType.CALL, self._seqid)
args = submitTopology_args()
args.name = name
args.uploadedJarLocation = uploadedJarLocation
args.jsonConf = jsonConf
args.topology = topology
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_submitTopology(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = submitTopology_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.ite is not None:
raise result.ite
if result.aze is not None:
raise result.aze
return
def submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
- options
"""
self.send_submitTopologyWithOpts(name, uploadedJarLocation, jsonConf, topology, options)
self.recv_submitTopologyWithOpts()
def send_submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options):
self._oprot.writeMessageBegin('submitTopologyWithOpts', TMessageType.CALL, self._seqid)
args = submitTopologyWithOpts_args()
args.name = name
args.uploadedJarLocation = uploadedJarLocation
args.jsonConf = jsonConf
args.topology = topology
args.options = options
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_submitTopologyWithOpts(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = submitTopologyWithOpts_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.ite is not None:
raise result.ite
if result.aze is not None:
raise result.aze
return
def killTopology(self, name):
"""
Parameters:
- name
"""
self.send_killTopology(name)
self.recv_killTopology()
def send_killTopology(self, name):
self._oprot.writeMessageBegin('killTopology', TMessageType.CALL, self._seqid)
args = killTopology_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_killTopology(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = killTopology_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
return
def killTopologyWithOpts(self, name, options):
"""
Parameters:
- name
- options
"""
self.send_killTopologyWithOpts(name, options)
self.recv_killTopologyWithOpts()
def send_killTopologyWithOpts(self, name, options):
self._oprot.writeMessageBegin('killTopologyWithOpts', TMessageType.CALL, self._seqid)
args = killTopologyWithOpts_args()
args.name = name
args.options = options
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_killTopologyWithOpts(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = killTopologyWithOpts_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
return
def activate(self, name):
"""
Parameters:
- name
"""
self.send_activate(name)
self.recv_activate()
def send_activate(self, name):
self._oprot.writeMessageBegin('activate', TMessageType.CALL, self._seqid)
args = activate_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_activate(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = activate_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
return
def deactivate(self, name):
"""
Parameters:
- name
"""
self.send_deactivate(name)
self.recv_deactivate()
def send_deactivate(self, name):
self._oprot.writeMessageBegin('deactivate', TMessageType.CALL, self._seqid)
args = deactivate_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deactivate(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deactivate_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
return
def rebalance(self, name, options):
"""
Parameters:
- name
- options
"""
self.send_rebalance(name, options)
self.recv_rebalance()
def send_rebalance(self, name, options):
self._oprot.writeMessageBegin('rebalance', TMessageType.CALL, self._seqid)
args = rebalance_args()
args.name = name
args.options = options
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_rebalance(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = rebalance_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.ite is not None:
raise result.ite
if result.aze is not None:
raise result.aze
return
def setLogConfig(self, name, config):
"""
Parameters:
- name
- config
"""
self.send_setLogConfig(name, config)
self.recv_setLogConfig()
def send_setLogConfig(self, name, config):
self._oprot.writeMessageBegin('setLogConfig', TMessageType.CALL, self._seqid)
args = setLogConfig_args()
args.name = name
args.config = config
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setLogConfig(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = setLogConfig_result()
result.read(iprot)
iprot.readMessageEnd()
return
def getLogConfig(self, name):
"""
Parameters:
- name
"""
self.send_getLogConfig(name)
return self.recv_getLogConfig()
def send_getLogConfig(self, name):
self._oprot.writeMessageBegin('getLogConfig', TMessageType.CALL, self._seqid)
args = getLogConfig_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getLogConfig(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getLogConfig_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getLogConfig failed: unknown result")
def debug(self, name, component, enable, samplingPercentage):
"""
Enable/disable logging the tuples generated in topology via an internal EventLogger bolt. The component name is optional
and if null or empty, the debug flag will apply to the entire topology.
The 'samplingPercentage' will limit loggging to a percentage of generated tuples.
Parameters:
- name
- component
- enable
- samplingPercentage
"""
self.send_debug(name, component, enable, samplingPercentage)
self.recv_debug()
def send_debug(self, name, component, enable, samplingPercentage):
self._oprot.writeMessageBegin('debug', TMessageType.CALL, self._seqid)
args = debug_args()
args.name = name
args.component = component
args.enable = enable
args.samplingPercentage = samplingPercentage
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_debug(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = debug_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
return
def setWorkerProfiler(self, id, profileRequest):
"""
Parameters:
- id
- profileRequest
"""
self.send_setWorkerProfiler(id, profileRequest)
self.recv_setWorkerProfiler()
def send_setWorkerProfiler(self, id, profileRequest):
self._oprot.writeMessageBegin('setWorkerProfiler', TMessageType.CALL, self._seqid)
args = setWorkerProfiler_args()
args.id = id
args.profileRequest = profileRequest
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setWorkerProfiler(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = setWorkerProfiler_result()
result.read(iprot)
iprot.readMessageEnd()
return
def getComponentPendingProfileActions(self, id, component_id, action):
"""
Parameters:
- id
- component_id
- action
"""
self.send_getComponentPendingProfileActions(id, component_id, action)
return self.recv_getComponentPendingProfileActions()
def send_getComponentPendingProfileActions(self, id, component_id, action):
self._oprot.writeMessageBegin('getComponentPendingProfileActions', TMessageType.CALL, self._seqid)
args = getComponentPendingProfileActions_args()
args.id = id
args.component_id = component_id
args.action = action
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getComponentPendingProfileActions(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getComponentPendingProfileActions_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getComponentPendingProfileActions failed: unknown result")
def uploadNewCredentials(self, name, creds):
"""
Parameters:
- name
- creds
"""
self.send_uploadNewCredentials(name, creds)
self.recv_uploadNewCredentials()
def send_uploadNewCredentials(self, name, creds):
self._oprot.writeMessageBegin('uploadNewCredentials', TMessageType.CALL, self._seqid)
args = uploadNewCredentials_args()
args.name = name
args.creds = creds
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_uploadNewCredentials(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = uploadNewCredentials_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.ite is not None:
raise result.ite
if result.aze is not None:
raise result.aze
return
def beginCreateBlob(self, key, meta):
"""
Parameters:
- key
- meta
"""
self.send_beginCreateBlob(key, meta)
return self.recv_beginCreateBlob()
def send_beginCreateBlob(self, key, meta):
self._oprot.writeMessageBegin('beginCreateBlob', TMessageType.CALL, self._seqid)
args = beginCreateBlob_args()
args.key = key
args.meta = meta
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_beginCreateBlob(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = beginCreateBlob_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
if result.kae is not None:
raise result.kae
raise TApplicationException(TApplicationException.MISSING_RESULT, "beginCreateBlob failed: unknown result")
def beginUpdateBlob(self, key):
"""
Parameters:
- key
"""
self.send_beginUpdateBlob(key)
return self.recv_beginUpdateBlob()
def send_beginUpdateBlob(self, key):
self._oprot.writeMessageBegin('beginUpdateBlob', TMessageType.CALL, self._seqid)
args = beginUpdateBlob_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_beginUpdateBlob(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = beginUpdateBlob_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
if result.knf is not None:
raise result.knf
raise TApplicationException(TApplicationException.MISSING_RESULT, "beginUpdateBlob failed: unknown result")
def uploadBlobChunk(self, session, chunk):
"""
Parameters:
- session
- chunk
"""
self.send_uploadBlobChunk(session, chunk)
self.recv_uploadBlobChunk()
def send_uploadBlobChunk(self, session, chunk):
self._oprot.writeMessageBegin('uploadBlobChunk', TMessageType.CALL, self._seqid)
args = uploadBlobChunk_args()
args.session = session
args.chunk = chunk
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_uploadBlobChunk(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = uploadBlobChunk_result()
result.read(iprot)
iprot.readMessageEnd()
if result.aze is not None:
raise result.aze
return
def finishBlobUpload(self, session):
"""
Parameters:
- session
"""
self.send_finishBlobUpload(session)
self.recv_finishBlobUpload()
def send_finishBlobUpload(self, session):
self._oprot.writeMessageBegin('finishBlobUpload', TMessageType.CALL, self._seqid)
args = finishBlobUpload_args()
args.session = session
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_finishBlobUpload(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = finishBlobUpload_result()
result.read(iprot)
iprot.readMessageEnd()
if result.aze is not None:
raise result.aze
return
def cancelBlobUpload(self, session):
"""
Parameters:
- session
"""
self.send_cancelBlobUpload(session)
self.recv_cancelBlobUpload()
def send_cancelBlobUpload(self, session):
self._oprot.writeMessageBegin('cancelBlobUpload', TMessageType.CALL, self._seqid)
args = cancelBlobUpload_args()
args.session = session
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_cancelBlobUpload(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = cancelBlobUpload_result()
result.read(iprot)
iprot.readMessageEnd()
if result.aze is not None:
raise result.aze
return
def getBlobMeta(self, key):
"""
Parameters:
- key
"""
self.send_getBlobMeta(key)
return self.recv_getBlobMeta()
def send_getBlobMeta(self, key):
self._oprot.writeMessageBegin('getBlobMeta', TMessageType.CALL, self._seqid)
args = getBlobMeta_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getBlobMeta(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getBlobMeta_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
if result.knf is not None:
raise result.knf
raise TApplicationException(TApplicationException.MISSING_RESULT, "getBlobMeta failed: unknown result")
def setBlobMeta(self, key, meta):
"""
Parameters:
- key
- meta
"""
self.send_setBlobMeta(key, meta)
self.recv_setBlobMeta()
def send_setBlobMeta(self, key, meta):
self._oprot.writeMessageBegin('setBlobMeta', TMessageType.CALL, self._seqid)
args = setBlobMeta_args()
args.key = key
args.meta = meta
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setBlobMeta(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = setBlobMeta_result()
result.read(iprot)
iprot.readMessageEnd()
if result.aze is not None:
raise result.aze
if result.knf is not None:
raise result.knf
return
def beginBlobDownload(self, key):
"""
Parameters:
- key
"""
self.send_beginBlobDownload(key)
return self.recv_beginBlobDownload()
def send_beginBlobDownload(self, key):
self._oprot.writeMessageBegin('beginBlobDownload', TMessageType.CALL, self._seqid)
args = beginBlobDownload_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_beginBlobDownload(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = beginBlobDownload_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
if result.knf is not None:
raise result.knf
raise TApplicationException(TApplicationException.MISSING_RESULT, "beginBlobDownload failed: unknown result")
def downloadBlobChunk(self, session):
"""
Parameters:
- session
"""
self.send_downloadBlobChunk(session)
return self.recv_downloadBlobChunk()
def send_downloadBlobChunk(self, session):
self._oprot.writeMessageBegin('downloadBlobChunk', TMessageType.CALL, self._seqid)
args = downloadBlobChunk_args()
args.session = session
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_downloadBlobChunk(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = downloadBlobChunk_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "downloadBlobChunk failed: unknown result")
def deleteBlob(self, key):
"""
Parameters:
- key
"""
self.send_deleteBlob(key)
self.recv_deleteBlob()
def send_deleteBlob(self, key):
self._oprot.writeMessageBegin('deleteBlob', TMessageType.CALL, self._seqid)
args = deleteBlob_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteBlob(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deleteBlob_result()
result.read(iprot)
iprot.readMessageEnd()
if result.aze is not None:
raise result.aze
if result.knf is not None:
raise result.knf
return
def listBlobs(self, session):
"""
Parameters:
- session
"""
self.send_listBlobs(session)
return self.recv_listBlobs()
def send_listBlobs(self, session):
self._oprot.writeMessageBegin('listBlobs', TMessageType.CALL, self._seqid)
args = listBlobs_args()
args.session = session
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_listBlobs(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = listBlobs_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "listBlobs failed: unknown result")
def getBlobReplication(self, key):
"""
Parameters:
- key
"""
self.send_getBlobReplication(key)
return self.recv_getBlobReplication()
def send_getBlobReplication(self, key):
self._oprot.writeMessageBegin('getBlobReplication', TMessageType.CALL, self._seqid)
args = getBlobReplication_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getBlobReplication(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getBlobReplication_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
if result.knf is not None:
raise result.knf
raise TApplicationException(TApplicationException.MISSING_RESULT, "getBlobReplication failed: unknown result")
def updateBlobReplication(self, key, replication):
"""
Parameters:
- key
- replication
"""
self.send_updateBlobReplication(key, replication)
return self.recv_updateBlobReplication()
def send_updateBlobReplication(self, key, replication):
self._oprot.writeMessageBegin('updateBlobReplication', TMessageType.CALL, self._seqid)
args = updateBlobReplication_args()
args.key = key
args.replication = replication
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_updateBlobReplication(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = updateBlobReplication_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
if result.knf is not None:
raise result.knf
raise TApplicationException(TApplicationException.MISSING_RESULT, "updateBlobReplication failed: unknown result")
def createStateInZookeeper(self, key):
"""
Parameters:
- key
"""
self.send_createStateInZookeeper(key)
self.recv_createStateInZookeeper()
def send_createStateInZookeeper(self, key):
self._oprot.writeMessageBegin('createStateInZookeeper', TMessageType.CALL, self._seqid)
args = createStateInZookeeper_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_createStateInZookeeper(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = createStateInZookeeper_result()
result.read(iprot)
iprot.readMessageEnd()
return
def beginFileUpload(self):
self.send_beginFileUpload()
return self.recv_beginFileUpload()
def send_beginFileUpload(self):
self._oprot.writeMessageBegin('beginFileUpload', TMessageType.CALL, self._seqid)
args = beginFileUpload_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_beginFileUpload(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = beginFileUpload_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "beginFileUpload failed: unknown result")
def uploadChunk(self, location, chunk):
"""
Parameters:
- location
- chunk
"""
self.send_uploadChunk(location, chunk)
self.recv_uploadChunk()
def send_uploadChunk(self, location, chunk):
self._oprot.writeMessageBegin('uploadChunk', TMessageType.CALL, self._seqid)
args = uploadChunk_args()
args.location = location
args.chunk = chunk
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_uploadChunk(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = uploadChunk_result()
result.read(iprot)
iprot.readMessageEnd()
if result.aze is not None:
raise result.aze
return
def finishFileUpload(self, location):
"""
Parameters:
- location
"""
self.send_finishFileUpload(location)
self.recv_finishFileUpload()
def send_finishFileUpload(self, location):
self._oprot.writeMessageBegin('finishFileUpload', TMessageType.CALL, self._seqid)
args = finishFileUpload_args()
args.location = location
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_finishFileUpload(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = finishFileUpload_result()
result.read(iprot)
iprot.readMessageEnd()
if result.aze is not None:
raise result.aze
return
def beginFileDownload(self, file):
"""
Parameters:
- file
"""
self.send_beginFileDownload(file)
return self.recv_beginFileDownload()
def send_beginFileDownload(self, file):
self._oprot.writeMessageBegin('beginFileDownload', TMessageType.CALL, self._seqid)
args = beginFileDownload_args()
args.file = file
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_beginFileDownload(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = beginFileDownload_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "beginFileDownload failed: unknown result")
def downloadChunk(self, id):
"""
Parameters:
- id
"""
self.send_downloadChunk(id)
return self.recv_downloadChunk()
def send_downloadChunk(self, id):
self._oprot.writeMessageBegin('downloadChunk', TMessageType.CALL, self._seqid)
args = downloadChunk_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_downloadChunk(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = downloadChunk_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "downloadChunk failed: unknown result")
def getNimbusConf(self):
self.send_getNimbusConf()
return self.recv_getNimbusConf()
def send_getNimbusConf(self):
self._oprot.writeMessageBegin('getNimbusConf', TMessageType.CALL, self._seqid)
args = getNimbusConf_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getNimbusConf(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getNimbusConf_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "getNimbusConf failed: unknown result")
def getClusterInfo(self):
self.send_getClusterInfo()
return self.recv_getClusterInfo()
def send_getClusterInfo(self):
self._oprot.writeMessageBegin('getClusterInfo', TMessageType.CALL, self._seqid)
args = getClusterInfo_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getClusterInfo(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getClusterInfo_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "getClusterInfo failed: unknown result")
def getLeader(self):
self.send_getLeader()
return self.recv_getLeader()
def send_getLeader(self):
self._oprot.writeMessageBegin('getLeader', TMessageType.CALL, self._seqid)
args = getLeader_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getLeader(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getLeader_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "getLeader failed: unknown result")
def isTopologyNameAllowed(self, name):
"""
Parameters:
- name
"""
self.send_isTopologyNameAllowed(name)
return self.recv_isTopologyNameAllowed()
def send_isTopologyNameAllowed(self, name):
self._oprot.writeMessageBegin('isTopologyNameAllowed', TMessageType.CALL, self._seqid)
args = isTopologyNameAllowed_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_isTopologyNameAllowed(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = isTopologyNameAllowed_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "isTopologyNameAllowed failed: unknown result")
def getTopologyInfo(self, id):
"""
Parameters:
- id
"""
self.send_getTopologyInfo(id)
return self.recv_getTopologyInfo()
def send_getTopologyInfo(self, id):
self._oprot.writeMessageBegin('getTopologyInfo', TMessageType.CALL, self._seqid)
args = getTopologyInfo_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTopologyInfo(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getTopologyInfo_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopologyInfo failed: unknown result")
def getTopologyInfoWithOpts(self, id, options):
"""
Parameters:
- id
- options
"""
self.send_getTopologyInfoWithOpts(id, options)
return self.recv_getTopologyInfoWithOpts()
def send_getTopologyInfoWithOpts(self, id, options):
self._oprot.writeMessageBegin('getTopologyInfoWithOpts', TMessageType.CALL, self._seqid)
args = getTopologyInfoWithOpts_args()
args.id = id
args.options = options
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTopologyInfoWithOpts(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getTopologyInfoWithOpts_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopologyInfoWithOpts failed: unknown result")
def getTopologyPageInfo(self, id, window, is_include_sys):
"""
Parameters:
- id
- window
- is_include_sys
"""
self.send_getTopologyPageInfo(id, window, is_include_sys)
return self.recv_getTopologyPageInfo()
def send_getTopologyPageInfo(self, id, window, is_include_sys):
self._oprot.writeMessageBegin('getTopologyPageInfo', TMessageType.CALL, self._seqid)
args = getTopologyPageInfo_args()
args.id = id
args.window = window
args.is_include_sys = is_include_sys
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTopologyPageInfo(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getTopologyPageInfo_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopologyPageInfo failed: unknown result")
def getSupervisorPageInfo(self, id, host, is_include_sys):
"""
Parameters:
- id
- host
- is_include_sys
"""
self.send_getSupervisorPageInfo(id, host, is_include_sys)
return self.recv_getSupervisorPageInfo()
def send_getSupervisorPageInfo(self, id, host, is_include_sys):
self._oprot.writeMessageBegin('getSupervisorPageInfo', TMessageType.CALL, self._seqid)
args = getSupervisorPageInfo_args()
args.id = id
args.host = host
args.is_include_sys = is_include_sys
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getSupervisorPageInfo(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getSupervisorPageInfo_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "getSupervisorPageInfo failed: unknown result")
def getComponentPageInfo(self, topology_id, component_id, window, is_include_sys):
"""
Parameters:
- topology_id
- component_id
- window
- is_include_sys
"""
self.send_getComponentPageInfo(topology_id, component_id, window, is_include_sys)
return self.recv_getComponentPageInfo()
def send_getComponentPageInfo(self, topology_id, component_id, window, is_include_sys):
self._oprot.writeMessageBegin('getComponentPageInfo', TMessageType.CALL, self._seqid)
args = getComponentPageInfo_args()
args.topology_id = topology_id
args.component_id = component_id
args.window = window
args.is_include_sys = is_include_sys
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getComponentPageInfo(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getComponentPageInfo_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "getComponentPageInfo failed: unknown result")
def getTopologyConf(self, id):
"""
Parameters:
- id
"""
self.send_getTopologyConf(id)
return self.recv_getTopologyConf()
def send_getTopologyConf(self, id):
self._oprot.writeMessageBegin('getTopologyConf', TMessageType.CALL, self._seqid)
args = getTopologyConf_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTopologyConf(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getTopologyConf_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopologyConf failed: unknown result")
def getTopology(self, id):
"""
Returns the compiled topology that contains ackers and metrics consumsers. Compare {@link #getUserTopology(String id)}.
Parameters:
- id
"""
self.send_getTopology(id)
return self.recv_getTopology()
def send_getTopology(self, id):
self._oprot.writeMessageBegin('getTopology', TMessageType.CALL, self._seqid)
args = getTopology_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTopology(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getTopology_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopology failed: unknown result")
def getUserTopology(self, id):
"""
Returns the user specified topology as submitted originally. Compare {@link #getTopology(String id)}.
Parameters:
- id
"""
self.send_getUserTopology(id)
return self.recv_getUserTopology()
def send_getUserTopology(self, id):
self._oprot.writeMessageBegin('getUserTopology', TMessageType.CALL, self._seqid)
args = getUserTopology_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getUserTopology(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getUserTopology_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "getUserTopology failed: unknown result")
def getTopologyHistory(self, user):
"""
Parameters:
- user
"""
self.send_getTopologyHistory(user)
return self.recv_getTopologyHistory()
def send_getTopologyHistory(self, user):
self._oprot.writeMessageBegin('getTopologyHistory', TMessageType.CALL, self._seqid)
args = getTopologyHistory_args()
args.user = user
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTopologyHistory(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getTopologyHistory_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopologyHistory failed: unknown result")
def getOwnerResourceSummaries(self, owner):
"""
Parameters:
- owner
"""
self.send_getOwnerResourceSummaries(owner)
return self.recv_getOwnerResourceSummaries()
def send_getOwnerResourceSummaries(self, owner):
self._oprot.writeMessageBegin('getOwnerResourceSummaries', TMessageType.CALL, self._seqid)
args = getOwnerResourceSummaries_args()
args.owner = owner
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getOwnerResourceSummaries(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getOwnerResourceSummaries_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "getOwnerResourceSummaries failed: unknown result")
def processWorkerMetrics(self, metrics):
"""
Parameters:
- metrics
"""
self.send_processWorkerMetrics(metrics)
self.recv_processWorkerMetrics()
def send_processWorkerMetrics(self, metrics):
self._oprot.writeMessageBegin('processWorkerMetrics', TMessageType.CALL, self._seqid)
args = processWorkerMetrics_args()
args.metrics = metrics
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_processWorkerMetrics(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = processWorkerMetrics_result()
result.read(iprot)
iprot.readMessageEnd()
return
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["submitTopology"] = Processor.process_submitTopology
self._processMap["submitTopologyWithOpts"] = Processor.process_submitTopologyWithOpts
self._processMap["killTopology"] = Processor.process_killTopology
self._processMap["killTopologyWithOpts"] = Processor.process_killTopologyWithOpts
self._processMap["activate"] = Processor.process_activate
self._processMap["deactivate"] = Processor.process_deactivate
self._processMap["rebalance"] = Processor.process_rebalance
self._processMap["setLogConfig"] = Processor.process_setLogConfig
self._processMap["getLogConfig"] = Processor.process_getLogConfig
self._processMap["debug"] = Processor.process_debug
self._processMap["setWorkerProfiler"] = Processor.process_setWorkerProfiler
self._processMap["getComponentPendingProfileActions"] = Processor.process_getComponentPendingProfileActions
self._processMap["uploadNewCredentials"] = Processor.process_uploadNewCredentials
self._processMap["beginCreateBlob"] = Processor.process_beginCreateBlob
self._processMap["beginUpdateBlob"] = Processor.process_beginUpdateBlob
self._processMap["uploadBlobChunk"] = Processor.process_uploadBlobChunk
self._processMap["finishBlobUpload"] = Processor.process_finishBlobUpload
self._processMap["cancelBlobUpload"] = Processor.process_cancelBlobUpload
self._processMap["getBlobMeta"] = Processor.process_getBlobMeta
self._processMap["setBlobMeta"] = Processor.process_setBlobMeta
self._processMap["beginBlobDownload"] = Processor.process_beginBlobDownload
self._processMap["downloadBlobChunk"] = Processor.process_downloadBlobChunk
self._processMap["deleteBlob"] = Processor.process_deleteBlob
self._processMap["listBlobs"] = Processor.process_listBlobs
self._processMap["getBlobReplication"] = Processor.process_getBlobReplication
self._processMap["updateBlobReplication"] = Processor.process_updateBlobReplication
self._processMap["createStateInZookeeper"] = Processor.process_createStateInZookeeper
self._processMap["beginFileUpload"] = Processor.process_beginFileUpload
self._processMap["uploadChunk"] = Processor.process_uploadChunk
self._processMap["finishFileUpload"] = Processor.process_finishFileUpload
self._processMap["beginFileDownload"] = Processor.process_beginFileDownload
self._processMap["downloadChunk"] = Processor.process_downloadChunk
self._processMap["getNimbusConf"] = Processor.process_getNimbusConf
self._processMap["getClusterInfo"] = Processor.process_getClusterInfo
self._processMap["getLeader"] = Processor.process_getLeader
self._processMap["isTopologyNameAllowed"] = Processor.process_isTopologyNameAllowed
self._processMap["getTopologyInfo"] = Processor.process_getTopologyInfo
self._processMap["getTopologyInfoWithOpts"] = Processor.process_getTopologyInfoWithOpts
self._processMap["getTopologyPageInfo"] = Processor.process_getTopologyPageInfo
self._processMap["getSupervisorPageInfo"] = Processor.process_getSupervisorPageInfo
self._processMap["getComponentPageInfo"] = Processor.process_getComponentPageInfo
self._processMap["getTopologyConf"] = Processor.process_getTopologyConf
self._processMap["getTopology"] = Processor.process_getTopology
self._processMap["getUserTopology"] = Processor.process_getUserTopology
self._processMap["getTopologyHistory"] = Processor.process_getTopologyHistory
self._processMap["getOwnerResourceSummaries"] = Processor.process_getOwnerResourceSummaries
self._processMap["processWorkerMetrics"] = Processor.process_processWorkerMetrics
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_submitTopology(self, seqid, iprot, oprot):
args = submitTopology_args()
args.read(iprot)
iprot.readMessageEnd()
result = submitTopology_result()
try:
self._handler.submitTopology(args.name, args.uploadedJarLocation, args.jsonConf, args.topology)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AlreadyAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except InvalidTopologyException as ite:
msg_type = TMessageType.REPLY
result.ite = ite
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("submitTopology", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_submitTopologyWithOpts(self, seqid, iprot, oprot):
args = submitTopologyWithOpts_args()
args.read(iprot)
iprot.readMessageEnd()
result = submitTopologyWithOpts_result()
try:
self._handler.submitTopologyWithOpts(args.name, args.uploadedJarLocation, args.jsonConf, args.topology, args.options)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AlreadyAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except InvalidTopologyException as ite:
msg_type = TMessageType.REPLY
result.ite = ite
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("submitTopologyWithOpts", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_killTopology(self, seqid, iprot, oprot):
args = killTopology_args()
args.read(iprot)
iprot.readMessageEnd()
result = killTopology_result()
try:
self._handler.killTopology(args.name)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("killTopology", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_killTopologyWithOpts(self, seqid, iprot, oprot):
args = killTopologyWithOpts_args()
args.read(iprot)
iprot.readMessageEnd()
result = killTopologyWithOpts_result()
try:
self._handler.killTopologyWithOpts(args.name, args.options)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("killTopologyWithOpts", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_activate(self, seqid, iprot, oprot):
args = activate_args()
args.read(iprot)
iprot.readMessageEnd()
result = activate_result()
try:
self._handler.activate(args.name)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("activate", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deactivate(self, seqid, iprot, oprot):
args = deactivate_args()
args.read(iprot)
iprot.readMessageEnd()
result = deactivate_result()
try:
self._handler.deactivate(args.name)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("deactivate", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_rebalance(self, seqid, iprot, oprot):
args = rebalance_args()
args.read(iprot)
iprot.readMessageEnd()
result = rebalance_result()
try:
self._handler.rebalance(args.name, args.options)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except InvalidTopologyException as ite:
msg_type = TMessageType.REPLY
result.ite = ite
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("rebalance", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setLogConfig(self, seqid, iprot, oprot):
args = setLogConfig_args()
args.read(iprot)
iprot.readMessageEnd()
result = setLogConfig_result()
try:
self._handler.setLogConfig(args.name, args.config)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("setLogConfig", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getLogConfig(self, seqid, iprot, oprot):
args = getLogConfig_args()
args.read(iprot)
iprot.readMessageEnd()
result = getLogConfig_result()
try:
result.success = self._handler.getLogConfig(args.name)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getLogConfig", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_debug(self, seqid, iprot, oprot):
args = debug_args()
args.read(iprot)
iprot.readMessageEnd()
result = debug_result()
try:
self._handler.debug(args.name, args.component, args.enable, args.samplingPercentage)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("debug", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setWorkerProfiler(self, seqid, iprot, oprot):
args = setWorkerProfiler_args()
args.read(iprot)
iprot.readMessageEnd()
result = setWorkerProfiler_result()
try:
self._handler.setWorkerProfiler(args.id, args.profileRequest)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("setWorkerProfiler", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getComponentPendingProfileActions(self, seqid, iprot, oprot):
args = getComponentPendingProfileActions_args()
args.read(iprot)
iprot.readMessageEnd()
result = getComponentPendingProfileActions_result()
try:
result.success = self._handler.getComponentPendingProfileActions(args.id, args.component_id, args.action)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getComponentPendingProfileActions", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_uploadNewCredentials(self, seqid, iprot, oprot):
args = uploadNewCredentials_args()
args.read(iprot)
iprot.readMessageEnd()
result = uploadNewCredentials_result()
try:
self._handler.uploadNewCredentials(args.name, args.creds)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except InvalidTopologyException as ite:
msg_type = TMessageType.REPLY
result.ite = ite
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("uploadNewCredentials", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_beginCreateBlob(self, seqid, iprot, oprot):
args = beginCreateBlob_args()
args.read(iprot)
iprot.readMessageEnd()
result = beginCreateBlob_result()
try:
result.success = self._handler.beginCreateBlob(args.key, args.meta)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except KeyAlreadyExistsException as kae:
msg_type = TMessageType.REPLY
result.kae = kae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("beginCreateBlob", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_beginUpdateBlob(self, seqid, iprot, oprot):
args = beginUpdateBlob_args()
args.read(iprot)
iprot.readMessageEnd()
result = beginUpdateBlob_result()
try:
result.success = self._handler.beginUpdateBlob(args.key)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except KeyNotFoundException as knf:
msg_type = TMessageType.REPLY
result.knf = knf
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("beginUpdateBlob", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_uploadBlobChunk(self, seqid, iprot, oprot):
args = uploadBlobChunk_args()
args.read(iprot)
iprot.readMessageEnd()
result = uploadBlobChunk_result()
try:
self._handler.uploadBlobChunk(args.session, args.chunk)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("uploadBlobChunk", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_finishBlobUpload(self, seqid, iprot, oprot):
args = finishBlobUpload_args()
args.read(iprot)
iprot.readMessageEnd()
result = finishBlobUpload_result()
try:
self._handler.finishBlobUpload(args.session)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("finishBlobUpload", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_cancelBlobUpload(self, seqid, iprot, oprot):
args = cancelBlobUpload_args()
args.read(iprot)
iprot.readMessageEnd()
result = cancelBlobUpload_result()
try:
self._handler.cancelBlobUpload(args.session)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("cancelBlobUpload", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getBlobMeta(self, seqid, iprot, oprot):
args = getBlobMeta_args()
args.read(iprot)
iprot.readMessageEnd()
result = getBlobMeta_result()
try:
result.success = self._handler.getBlobMeta(args.key)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except KeyNotFoundException as knf:
msg_type = TMessageType.REPLY
result.knf = knf
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getBlobMeta", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setBlobMeta(self, seqid, iprot, oprot):
args = setBlobMeta_args()
args.read(iprot)
iprot.readMessageEnd()
result = setBlobMeta_result()
try:
self._handler.setBlobMeta(args.key, args.meta)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except KeyNotFoundException as knf:
msg_type = TMessageType.REPLY
result.knf = knf
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("setBlobMeta", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_beginBlobDownload(self, seqid, iprot, oprot):
args = beginBlobDownload_args()
args.read(iprot)
iprot.readMessageEnd()
result = beginBlobDownload_result()
try:
result.success = self._handler.beginBlobDownload(args.key)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except KeyNotFoundException as knf:
msg_type = TMessageType.REPLY
result.knf = knf
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("beginBlobDownload", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_downloadBlobChunk(self, seqid, iprot, oprot):
args = downloadBlobChunk_args()
args.read(iprot)
iprot.readMessageEnd()
result = downloadBlobChunk_result()
try:
result.success = self._handler.downloadBlobChunk(args.session)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("downloadBlobChunk", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteBlob(self, seqid, iprot, oprot):
args = deleteBlob_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteBlob_result()
try:
self._handler.deleteBlob(args.key)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except KeyNotFoundException as knf:
msg_type = TMessageType.REPLY
result.knf = knf
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("deleteBlob", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_listBlobs(self, seqid, iprot, oprot):
args = listBlobs_args()
args.read(iprot)
iprot.readMessageEnd()
result = listBlobs_result()
try:
result.success = self._handler.listBlobs(args.session)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("listBlobs", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getBlobReplication(self, seqid, iprot, oprot):
args = getBlobReplication_args()
args.read(iprot)
iprot.readMessageEnd()
result = getBlobReplication_result()
try:
result.success = self._handler.getBlobReplication(args.key)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except KeyNotFoundException as knf:
msg_type = TMessageType.REPLY
result.knf = knf
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getBlobReplication", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_updateBlobReplication(self, seqid, iprot, oprot):
args = updateBlobReplication_args()
args.read(iprot)
iprot.readMessageEnd()
result = updateBlobReplication_result()
try:
result.success = self._handler.updateBlobReplication(args.key, args.replication)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except KeyNotFoundException as knf:
msg_type = TMessageType.REPLY
result.knf = knf
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("updateBlobReplication", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_createStateInZookeeper(self, seqid, iprot, oprot):
args = createStateInZookeeper_args()
args.read(iprot)
iprot.readMessageEnd()
result = createStateInZookeeper_result()
try:
self._handler.createStateInZookeeper(args.key)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("createStateInZookeeper", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_beginFileUpload(self, seqid, iprot, oprot):
args = beginFileUpload_args()
args.read(iprot)
iprot.readMessageEnd()
result = beginFileUpload_result()
try:
result.success = self._handler.beginFileUpload()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("beginFileUpload", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_uploadChunk(self, seqid, iprot, oprot):
args = uploadChunk_args()
args.read(iprot)
iprot.readMessageEnd()
result = uploadChunk_result()
try:
self._handler.uploadChunk(args.location, args.chunk)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("uploadChunk", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_finishFileUpload(self, seqid, iprot, oprot):
args = finishFileUpload_args()
args.read(iprot)
iprot.readMessageEnd()
result = finishFileUpload_result()
try:
self._handler.finishFileUpload(args.location)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("finishFileUpload", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_beginFileDownload(self, seqid, iprot, oprot):
args = beginFileDownload_args()
args.read(iprot)
iprot.readMessageEnd()
result = beginFileDownload_result()
try:
result.success = self._handler.beginFileDownload(args.file)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("beginFileDownload", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_downloadChunk(self, seqid, iprot, oprot):
args = downloadChunk_args()
args.read(iprot)
iprot.readMessageEnd()
result = downloadChunk_result()
try:
result.success = self._handler.downloadChunk(args.id)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("downloadChunk", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getNimbusConf(self, seqid, iprot, oprot):
args = getNimbusConf_args()
args.read(iprot)
iprot.readMessageEnd()
result = getNimbusConf_result()
try:
result.success = self._handler.getNimbusConf()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getNimbusConf", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getClusterInfo(self, seqid, iprot, oprot):
args = getClusterInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = getClusterInfo_result()
try:
result.success = self._handler.getClusterInfo()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getClusterInfo", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getLeader(self, seqid, iprot, oprot):
args = getLeader_args()
args.read(iprot)
iprot.readMessageEnd()
result = getLeader_result()
try:
result.success = self._handler.getLeader()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getLeader", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_isTopologyNameAllowed(self, seqid, iprot, oprot):
args = isTopologyNameAllowed_args()
args.read(iprot)
iprot.readMessageEnd()
result = isTopologyNameAllowed_result()
try:
result.success = self._handler.isTopologyNameAllowed(args.name)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("isTopologyNameAllowed", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTopologyInfo(self, seqid, iprot, oprot):
args = getTopologyInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTopologyInfo_result()
try:
result.success = self._handler.getTopologyInfo(args.id)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getTopologyInfo", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTopologyInfoWithOpts(self, seqid, iprot, oprot):
args = getTopologyInfoWithOpts_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTopologyInfoWithOpts_result()
try:
result.success = self._handler.getTopologyInfoWithOpts(args.id, args.options)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getTopologyInfoWithOpts", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTopologyPageInfo(self, seqid, iprot, oprot):
args = getTopologyPageInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTopologyPageInfo_result()
try:
result.success = self._handler.getTopologyPageInfo(args.id, args.window, args.is_include_sys)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getTopologyPageInfo", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getSupervisorPageInfo(self, seqid, iprot, oprot):
args = getSupervisorPageInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = getSupervisorPageInfo_result()
try:
result.success = self._handler.getSupervisorPageInfo(args.id, args.host, args.is_include_sys)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getSupervisorPageInfo", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getComponentPageInfo(self, seqid, iprot, oprot):
args = getComponentPageInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = getComponentPageInfo_result()
try:
result.success = self._handler.getComponentPageInfo(args.topology_id, args.component_id, args.window, args.is_include_sys)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getComponentPageInfo", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTopologyConf(self, seqid, iprot, oprot):
args = getTopologyConf_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTopologyConf_result()
try:
result.success = self._handler.getTopologyConf(args.id)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getTopologyConf", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTopology(self, seqid, iprot, oprot):
args = getTopology_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTopology_result()
try:
result.success = self._handler.getTopology(args.id)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getTopology", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getUserTopology(self, seqid, iprot, oprot):
args = getUserTopology_args()
args.read(iprot)
iprot.readMessageEnd()
result = getUserTopology_result()
try:
result.success = self._handler.getUserTopology(args.id)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotAliveException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getUserTopology", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTopologyHistory(self, seqid, iprot, oprot):
args = getTopologyHistory_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTopologyHistory_result()
try:
result.success = self._handler.getTopologyHistory(args.user)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getTopologyHistory", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getOwnerResourceSummaries(self, seqid, iprot, oprot):
args = getOwnerResourceSummaries_args()
args.read(iprot)
iprot.readMessageEnd()
result = getOwnerResourceSummaries_result()
try:
result.success = self._handler.getOwnerResourceSummaries(args.owner)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getOwnerResourceSummaries", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_processWorkerMetrics(self, seqid, iprot, oprot):
args = processWorkerMetrics_args()
args.read(iprot)
iprot.readMessageEnd()
result = processWorkerMetrics_result()
try:
self._handler.processWorkerMetrics(args.metrics)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("processWorkerMetrics", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class submitTopology_args:
"""
Attributes:
- name
- uploadedJarLocation
- jsonConf
- topology
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'uploadedJarLocation', None, None, ), # 2
(3, TType.STRING, 'jsonConf', None, None, ), # 3
(4, TType.STRUCT, 'topology', (StormTopology, StormTopology.thrift_spec), None, ), # 4
)
def __init__(self, name=None, uploadedJarLocation=None, jsonConf=None, topology=None,):
self.name = name
self.uploadedJarLocation = uploadedJarLocation
self.jsonConf = jsonConf
self.topology = topology
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.uploadedJarLocation = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.jsonConf = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.topology = StormTopology()
self.topology.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitTopology_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.uploadedJarLocation is not None:
oprot.writeFieldBegin('uploadedJarLocation', TType.STRING, 2)
oprot.writeString(self.uploadedJarLocation.encode('utf-8'))
oprot.writeFieldEnd()
if self.jsonConf is not None:
oprot.writeFieldBegin('jsonConf', TType.STRING, 3)
oprot.writeString(self.jsonConf.encode('utf-8'))
oprot.writeFieldEnd()
if self.topology is not None:
oprot.writeFieldBegin('topology', TType.STRUCT, 4)
self.topology.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.uploadedJarLocation)
value = (value * 31) ^ hash(self.jsonConf)
value = (value * 31) ^ hash(self.topology)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class submitTopology_result:
"""
Attributes:
- e
- ite
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (AlreadyAliveException, AlreadyAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ite', (InvalidTopologyException, InvalidTopologyException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 3
)
def __init__(self, e=None, ite=None, aze=None,):
self.e = e
self.ite = ite
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = AlreadyAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ite = InvalidTopologyException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitTopology_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.ite is not None:
oprot.writeFieldBegin('ite', TType.STRUCT, 2)
self.ite.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 3)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.ite)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class submitTopologyWithOpts_args:
"""
Attributes:
- name
- uploadedJarLocation
- jsonConf
- topology
- options
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'uploadedJarLocation', None, None, ), # 2
(3, TType.STRING, 'jsonConf', None, None, ), # 3
(4, TType.STRUCT, 'topology', (StormTopology, StormTopology.thrift_spec), None, ), # 4
(5, TType.STRUCT, 'options', (SubmitOptions, SubmitOptions.thrift_spec), None, ), # 5
)
def __init__(self, name=None, uploadedJarLocation=None, jsonConf=None, topology=None, options=None,):
self.name = name
self.uploadedJarLocation = uploadedJarLocation
self.jsonConf = jsonConf
self.topology = topology
self.options = options
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.uploadedJarLocation = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.jsonConf = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.topology = StormTopology()
self.topology.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.options = SubmitOptions()
self.options.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitTopologyWithOpts_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.uploadedJarLocation is not None:
oprot.writeFieldBegin('uploadedJarLocation', TType.STRING, 2)
oprot.writeString(self.uploadedJarLocation.encode('utf-8'))
oprot.writeFieldEnd()
if self.jsonConf is not None:
oprot.writeFieldBegin('jsonConf', TType.STRING, 3)
oprot.writeString(self.jsonConf.encode('utf-8'))
oprot.writeFieldEnd()
if self.topology is not None:
oprot.writeFieldBegin('topology', TType.STRUCT, 4)
self.topology.write(oprot)
oprot.writeFieldEnd()
if self.options is not None:
oprot.writeFieldBegin('options', TType.STRUCT, 5)
self.options.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.uploadedJarLocation)
value = (value * 31) ^ hash(self.jsonConf)
value = (value * 31) ^ hash(self.topology)
value = (value * 31) ^ hash(self.options)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class submitTopologyWithOpts_result:
"""
Attributes:
- e
- ite
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (AlreadyAliveException, AlreadyAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ite', (InvalidTopologyException, InvalidTopologyException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 3
)
def __init__(self, e=None, ite=None, aze=None,):
self.e = e
self.ite = ite
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = AlreadyAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ite = InvalidTopologyException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitTopologyWithOpts_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.ite is not None:
oprot.writeFieldBegin('ite', TType.STRUCT, 2)
self.ite.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 3)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.ite)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class killTopology_args:
"""
Attributes:
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
)
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('killTopology_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class killTopology_result:
"""
Attributes:
- e
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, e=None, aze=None,):
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('killTopology_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class killTopologyWithOpts_args:
"""
Attributes:
- name
- options
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRUCT, 'options', (KillOptions, KillOptions.thrift_spec), None, ), # 2
)
def __init__(self, name=None, options=None,):
self.name = name
self.options = options
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.options = KillOptions()
self.options.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('killTopologyWithOpts_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.options is not None:
oprot.writeFieldBegin('options', TType.STRUCT, 2)
self.options.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.options)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class killTopologyWithOpts_result:
"""
Attributes:
- e
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, e=None, aze=None,):
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('killTopologyWithOpts_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class activate_args:
"""
Attributes:
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
)
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('activate_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class activate_result:
"""
Attributes:
- e
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, e=None, aze=None,):
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('activate_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deactivate_args:
"""
Attributes:
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
)
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deactivate_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deactivate_result:
"""
Attributes:
- e
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, e=None, aze=None,):
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deactivate_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rebalance_args:
"""
Attributes:
- name
- options
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRUCT, 'options', (RebalanceOptions, RebalanceOptions.thrift_spec), None, ), # 2
)
def __init__(self, name=None, options=None,):
self.name = name
self.options = options
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.options = RebalanceOptions()
self.options.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rebalance_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.options is not None:
oprot.writeFieldBegin('options', TType.STRUCT, 2)
self.options.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.options)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rebalance_result:
"""
Attributes:
- e
- ite
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ite', (InvalidTopologyException, InvalidTopologyException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 3
)
def __init__(self, e=None, ite=None, aze=None,):
self.e = e
self.ite = ite
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ite = InvalidTopologyException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rebalance_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.ite is not None:
oprot.writeFieldBegin('ite', TType.STRUCT, 2)
self.ite.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 3)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.ite)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setLogConfig_args:
"""
Attributes:
- name
- config
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRUCT, 'config', (LogConfig, LogConfig.thrift_spec), None, ), # 2
)
def __init__(self, name=None, config=None,):
self.name = name
self.config = config
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.config = LogConfig()
self.config.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setLogConfig_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.config is not None:
oprot.writeFieldBegin('config', TType.STRUCT, 2)
self.config.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.config)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setLogConfig_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setLogConfig_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getLogConfig_args:
"""
Attributes:
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
)
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getLogConfig_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getLogConfig_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (LogConfig, LogConfig.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = LogConfig()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getLogConfig_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class debug_args:
"""
Attributes:
- name
- component
- enable
- samplingPercentage
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'component', None, None, ), # 2
(3, TType.BOOL, 'enable', None, None, ), # 3
(4, TType.DOUBLE, 'samplingPercentage', None, None, ), # 4
)
def __init__(self, name=None, component=None, enable=None, samplingPercentage=None,):
self.name = name
self.component = component
self.enable = enable
self.samplingPercentage = samplingPercentage
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.component = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.enable = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.samplingPercentage = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('debug_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 2)
oprot.writeString(self.component.encode('utf-8'))
oprot.writeFieldEnd()
if self.enable is not None:
oprot.writeFieldBegin('enable', TType.BOOL, 3)
oprot.writeBool(self.enable)
oprot.writeFieldEnd()
if self.samplingPercentage is not None:
oprot.writeFieldBegin('samplingPercentage', TType.DOUBLE, 4)
oprot.writeDouble(self.samplingPercentage)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.enable)
value = (value * 31) ^ hash(self.samplingPercentage)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class debug_result:
"""
Attributes:
- e
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, e=None, aze=None,):
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('debug_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setWorkerProfiler_args:
"""
Attributes:
- id
- profileRequest
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRUCT, 'profileRequest', (ProfileRequest, ProfileRequest.thrift_spec), None, ), # 2
)
def __init__(self, id=None, profileRequest=None,):
self.id = id
self.profileRequest = profileRequest
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.profileRequest = ProfileRequest()
self.profileRequest.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setWorkerProfiler_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
if self.profileRequest is not None:
oprot.writeFieldBegin('profileRequest', TType.STRUCT, 2)
self.profileRequest.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.profileRequest)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setWorkerProfiler_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setWorkerProfiler_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getComponentPendingProfileActions_args:
"""
Attributes:
- id
- component_id
- action
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'component_id', None, None, ), # 2
(3, TType.I32, 'action', None, None, ), # 3
)
def __init__(self, id=None, component_id=None, action=None,):
self.id = id
self.component_id = component_id
self.action = action
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.component_id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.action = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getComponentPendingProfileActions_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
if self.component_id is not None:
oprot.writeFieldBegin('component_id', TType.STRING, 2)
oprot.writeString(self.component_id.encode('utf-8'))
oprot.writeFieldEnd()
if self.action is not None:
oprot.writeFieldBegin('action', TType.I32, 3)
oprot.writeI32(self.action)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.component_id)
value = (value * 31) ^ hash(self.action)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getComponentPendingProfileActions_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(ProfileRequest, ProfileRequest.thrift_spec)), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype801, _size798) = iprot.readListBegin()
for _i802 in xrange(_size798):
_elem803 = ProfileRequest()
_elem803.read(iprot)
self.success.append(_elem803)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getComponentPendingProfileActions_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter804 in self.success:
iter804.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class uploadNewCredentials_args:
"""
Attributes:
- name
- creds
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRUCT, 'creds', (Credentials, Credentials.thrift_spec), None, ), # 2
)
def __init__(self, name=None, creds=None,):
self.name = name
self.creds = creds
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.creds = Credentials()
self.creds.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('uploadNewCredentials_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.creds is not None:
oprot.writeFieldBegin('creds', TType.STRUCT, 2)
self.creds.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.creds)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class uploadNewCredentials_result:
"""
Attributes:
- e
- ite
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ite', (InvalidTopologyException, InvalidTopologyException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 3
)
def __init__(self, e=None, ite=None, aze=None,):
self.e = e
self.ite = ite
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ite = InvalidTopologyException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('uploadNewCredentials_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.ite is not None:
oprot.writeFieldBegin('ite', TType.STRUCT, 2)
self.ite.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 3)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.ite)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginCreateBlob_args:
"""
Attributes:
- key
- meta
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
(2, TType.STRUCT, 'meta', (SettableBlobMeta, SettableBlobMeta.thrift_spec), None, ), # 2
)
def __init__(self, key=None, meta=None,):
self.key = key
self.meta = meta
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.meta = SettableBlobMeta()
self.meta.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginCreateBlob_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8'))
oprot.writeFieldEnd()
if self.meta is not None:
oprot.writeFieldBegin('meta', TType.STRUCT, 2)
self.meta.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.key)
value = (value * 31) ^ hash(self.meta)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginCreateBlob_result:
"""
Attributes:
- success
- aze
- kae
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'kae', (KeyAlreadyExistsException, KeyAlreadyExistsException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, aze=None, kae=None,):
self.success = success
self.aze = aze
self.kae = kae
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.kae = KeyAlreadyExistsException()
self.kae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginCreateBlob_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8'))
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
if self.kae is not None:
oprot.writeFieldBegin('kae', TType.STRUCT, 2)
self.kae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
value = (value * 31) ^ hash(self.kae)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginUpdateBlob_args:
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginUpdateBlob_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.key)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginUpdateBlob_result:
"""
Attributes:
- success
- aze
- knf
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'knf', (KeyNotFoundException, KeyNotFoundException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, aze=None, knf=None,):
self.success = success
self.aze = aze
self.knf = knf
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.knf = KeyNotFoundException()
self.knf.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginUpdateBlob_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8'))
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
if self.knf is not None:
oprot.writeFieldBegin('knf', TType.STRUCT, 2)
self.knf.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
value = (value * 31) ^ hash(self.knf)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class uploadBlobChunk_args:
"""
Attributes:
- session
- chunk
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'session', None, None, ), # 1
(2, TType.STRING, 'chunk', None, None, ), # 2
)
def __init__(self, session=None, chunk=None,):
self.session = session
self.chunk = chunk
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.session = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.chunk = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('uploadBlobChunk_args')
if self.session is not None:
oprot.writeFieldBegin('session', TType.STRING, 1)
oprot.writeString(self.session.encode('utf-8'))
oprot.writeFieldEnd()
if self.chunk is not None:
oprot.writeFieldBegin('chunk', TType.STRING, 2)
oprot.writeString(self.chunk)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.session)
value = (value * 31) ^ hash(self.chunk)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class uploadBlobChunk_result:
"""
Attributes:
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, aze=None,):
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('uploadBlobChunk_result')
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class finishBlobUpload_args:
"""
Attributes:
- session
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'session', None, None, ), # 1
)
def __init__(self, session=None,):
self.session = session
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.session = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('finishBlobUpload_args')
if self.session is not None:
oprot.writeFieldBegin('session', TType.STRING, 1)
oprot.writeString(self.session.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.session)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class finishBlobUpload_result:
"""
Attributes:
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, aze=None,):
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('finishBlobUpload_result')
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class cancelBlobUpload_args:
"""
Attributes:
- session
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'session', None, None, ), # 1
)
def __init__(self, session=None,):
self.session = session
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.session = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('cancelBlobUpload_args')
if self.session is not None:
oprot.writeFieldBegin('session', TType.STRING, 1)
oprot.writeString(self.session.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.session)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class cancelBlobUpload_result:
"""
Attributes:
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, aze=None,):
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('cancelBlobUpload_result')
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getBlobMeta_args:
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getBlobMeta_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.key)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getBlobMeta_result:
"""
Attributes:
- success
- aze
- knf
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ReadableBlobMeta, ReadableBlobMeta.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'knf', (KeyNotFoundException, KeyNotFoundException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, aze=None, knf=None,):
self.success = success
self.aze = aze
self.knf = knf
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ReadableBlobMeta()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.knf = KeyNotFoundException()
self.knf.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getBlobMeta_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
if self.knf is not None:
oprot.writeFieldBegin('knf', TType.STRUCT, 2)
self.knf.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
value = (value * 31) ^ hash(self.knf)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setBlobMeta_args:
"""
Attributes:
- key
- meta
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
(2, TType.STRUCT, 'meta', (SettableBlobMeta, SettableBlobMeta.thrift_spec), None, ), # 2
)
def __init__(self, key=None, meta=None,):
self.key = key
self.meta = meta
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.meta = SettableBlobMeta()
self.meta.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setBlobMeta_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8'))
oprot.writeFieldEnd()
if self.meta is not None:
oprot.writeFieldBegin('meta', TType.STRUCT, 2)
self.meta.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.key)
value = (value * 31) ^ hash(self.meta)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setBlobMeta_result:
"""
Attributes:
- aze
- knf
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'knf', (KeyNotFoundException, KeyNotFoundException.thrift_spec), None, ), # 2
)
def __init__(self, aze=None, knf=None,):
self.aze = aze
self.knf = knf
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.knf = KeyNotFoundException()
self.knf.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setBlobMeta_result')
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
if self.knf is not None:
oprot.writeFieldBegin('knf', TType.STRUCT, 2)
self.knf.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.aze)
value = (value * 31) ^ hash(self.knf)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginBlobDownload_args:
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginBlobDownload_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.key)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginBlobDownload_result:
"""
Attributes:
- success
- aze
- knf
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (BeginDownloadResult, BeginDownloadResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'knf', (KeyNotFoundException, KeyNotFoundException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, aze=None, knf=None,):
self.success = success
self.aze = aze
self.knf = knf
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = BeginDownloadResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.knf = KeyNotFoundException()
self.knf.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginBlobDownload_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
if self.knf is not None:
oprot.writeFieldBegin('knf', TType.STRUCT, 2)
self.knf.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
value = (value * 31) ^ hash(self.knf)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class downloadBlobChunk_args:
"""
Attributes:
- session
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'session', None, None, ), # 1
)
def __init__(self, session=None,):
self.session = session
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.session = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('downloadBlobChunk_args')
if self.session is not None:
oprot.writeFieldBegin('session', TType.STRING, 1)
oprot.writeString(self.session.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.session)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class downloadBlobChunk_result:
"""
Attributes:
- success
- aze
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, aze=None,):
self.success = success
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('downloadBlobChunk_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteBlob_args:
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteBlob_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.key)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteBlob_result:
"""
Attributes:
- aze
- knf
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'knf', (KeyNotFoundException, KeyNotFoundException.thrift_spec), None, ), # 2
)
def __init__(self, aze=None, knf=None,):
self.aze = aze
self.knf = knf
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.knf = KeyNotFoundException()
self.knf.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteBlob_result')
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
if self.knf is not None:
oprot.writeFieldBegin('knf', TType.STRUCT, 2)
self.knf.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.aze)
value = (value * 31) ^ hash(self.knf)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class listBlobs_args:
"""
Attributes:
- session
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'session', None, None, ), # 1
)
def __init__(self, session=None,):
self.session = session
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.session = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('listBlobs_args')
if self.session is not None:
oprot.writeFieldBegin('session', TType.STRING, 1)
oprot.writeString(self.session.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.session)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class listBlobs_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ListBlobsResult, ListBlobsResult.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ListBlobsResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('listBlobs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getBlobReplication_args:
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getBlobReplication_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.key)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getBlobReplication_result:
"""
Attributes:
- success
- aze
- knf
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'knf', (KeyNotFoundException, KeyNotFoundException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, aze=None, knf=None,):
self.success = success
self.aze = aze
self.knf = knf
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.knf = KeyNotFoundException()
self.knf.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getBlobReplication_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
if self.knf is not None:
oprot.writeFieldBegin('knf', TType.STRUCT, 2)
self.knf.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
value = (value * 31) ^ hash(self.knf)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateBlobReplication_args:
"""
Attributes:
- key
- replication
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
(2, TType.I32, 'replication', None, None, ), # 2
)
def __init__(self, key=None, replication=None,):
self.key = key
self.replication = replication
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.replication = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateBlobReplication_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8'))
oprot.writeFieldEnd()
if self.replication is not None:
oprot.writeFieldBegin('replication', TType.I32, 2)
oprot.writeI32(self.replication)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.key)
value = (value * 31) ^ hash(self.replication)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateBlobReplication_result:
"""
Attributes:
- success
- aze
- knf
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'knf', (KeyNotFoundException, KeyNotFoundException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, aze=None, knf=None,):
self.success = success
self.aze = aze
self.knf = knf
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.knf = KeyNotFoundException()
self.knf.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateBlobReplication_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
if self.knf is not None:
oprot.writeFieldBegin('knf', TType.STRUCT, 2)
self.knf.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
value = (value * 31) ^ hash(self.knf)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createStateInZookeeper_args:
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createStateInZookeeper_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.key)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createStateInZookeeper_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createStateInZookeeper_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginFileUpload_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginFileUpload_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginFileUpload_result:
"""
Attributes:
- success
- aze
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, aze=None,):
self.success = success
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginFileUpload_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8'))
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class uploadChunk_args:
"""
Attributes:
- location
- chunk
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'location', None, None, ), # 1
(2, TType.STRING, 'chunk', None, None, ), # 2
)
def __init__(self, location=None, chunk=None,):
self.location = location
self.chunk = chunk
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.location = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.chunk = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('uploadChunk_args')
if self.location is not None:
oprot.writeFieldBegin('location', TType.STRING, 1)
oprot.writeString(self.location.encode('utf-8'))
oprot.writeFieldEnd()
if self.chunk is not None:
oprot.writeFieldBegin('chunk', TType.STRING, 2)
oprot.writeString(self.chunk)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.location)
value = (value * 31) ^ hash(self.chunk)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class uploadChunk_result:
"""
Attributes:
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, aze=None,):
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('uploadChunk_result')
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class finishFileUpload_args:
"""
Attributes:
- location
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'location', None, None, ), # 1
)
def __init__(self, location=None,):
self.location = location
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.location = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('finishFileUpload_args')
if self.location is not None:
oprot.writeFieldBegin('location', TType.STRING, 1)
oprot.writeString(self.location.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.location)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class finishFileUpload_result:
"""
Attributes:
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, aze=None,):
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('finishFileUpload_result')
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginFileDownload_args:
"""
Attributes:
- file
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'file', None, None, ), # 1
)
def __init__(self, file=None,):
self.file = file
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.file = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginFileDownload_args')
if self.file is not None:
oprot.writeFieldBegin('file', TType.STRING, 1)
oprot.writeString(self.file.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.file)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginFileDownload_result:
"""
Attributes:
- success
- aze
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, aze=None,):
self.success = success
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginFileDownload_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8'))
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class downloadChunk_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('downloadChunk_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class downloadChunk_result:
"""
Attributes:
- success
- aze
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, aze=None,):
self.success = success
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('downloadChunk_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getNimbusConf_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getNimbusConf_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getNimbusConf_result:
"""
Attributes:
- success
- aze
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, aze=None,):
self.success = success
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getNimbusConf_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8'))
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getClusterInfo_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getClusterInfo_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getClusterInfo_result:
"""
Attributes:
- success
- aze
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ClusterSummary, ClusterSummary.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, aze=None,):
self.success = success
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ClusterSummary()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getClusterInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getLeader_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getLeader_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getLeader_result:
"""
Attributes:
- success
- aze
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (NimbusSummary, NimbusSummary.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, aze=None,):
self.success = success
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = NimbusSummary()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getLeader_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isTopologyNameAllowed_args:
"""
Attributes:
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
)
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('isTopologyNameAllowed_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isTopologyNameAllowed_result:
"""
Attributes:
- success
- aze
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, aze=None,):
self.success = success
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('isTopologyNameAllowed_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyInfo_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyInfo_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyInfo_result:
"""
Attributes:
- success
- e
- aze
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TopologyInfo, TopologyInfo.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, e=None, aze=None,):
self.success = success
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TopologyInfo()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyInfoWithOpts_args:
"""
Attributes:
- id
- options
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRUCT, 'options', (GetInfoOptions, GetInfoOptions.thrift_spec), None, ), # 2
)
def __init__(self, id=None, options=None,):
self.id = id
self.options = options
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.options = GetInfoOptions()
self.options.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyInfoWithOpts_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
if self.options is not None:
oprot.writeFieldBegin('options', TType.STRUCT, 2)
self.options.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.options)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyInfoWithOpts_result:
"""
Attributes:
- success
- e
- aze
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TopologyInfo, TopologyInfo.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, e=None, aze=None,):
self.success = success
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TopologyInfo()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyInfoWithOpts_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyPageInfo_args:
"""
Attributes:
- id
- window
- is_include_sys
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'window', None, None, ), # 2
(3, TType.BOOL, 'is_include_sys', None, None, ), # 3
)
def __init__(self, id=None, window=None, is_include_sys=None,):
self.id = id
self.window = window
self.is_include_sys = is_include_sys
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.window = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.is_include_sys = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyPageInfo_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
if self.window is not None:
oprot.writeFieldBegin('window', TType.STRING, 2)
oprot.writeString(self.window.encode('utf-8'))
oprot.writeFieldEnd()
if self.is_include_sys is not None:
oprot.writeFieldBegin('is_include_sys', TType.BOOL, 3)
oprot.writeBool(self.is_include_sys)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.window)
value = (value * 31) ^ hash(self.is_include_sys)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyPageInfo_result:
"""
Attributes:
- success
- e
- aze
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TopologyPageInfo, TopologyPageInfo.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, e=None, aze=None,):
self.success = success
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TopologyPageInfo()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyPageInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getSupervisorPageInfo_args:
"""
Attributes:
- id
- host
- is_include_sys
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'host', None, None, ), # 2
(3, TType.BOOL, 'is_include_sys', None, None, ), # 3
)
def __init__(self, id=None, host=None, is_include_sys=None,):
self.id = id
self.host = host
self.is_include_sys = is_include_sys
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.host = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.is_include_sys = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getSupervisorPageInfo_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
if self.host is not None:
oprot.writeFieldBegin('host', TType.STRING, 2)
oprot.writeString(self.host.encode('utf-8'))
oprot.writeFieldEnd()
if self.is_include_sys is not None:
oprot.writeFieldBegin('is_include_sys', TType.BOOL, 3)
oprot.writeBool(self.is_include_sys)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.host)
value = (value * 31) ^ hash(self.is_include_sys)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getSupervisorPageInfo_result:
"""
Attributes:
- success
- e
- aze
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (SupervisorPageInfo, SupervisorPageInfo.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, e=None, aze=None,):
self.success = success
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = SupervisorPageInfo()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getSupervisorPageInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getComponentPageInfo_args:
"""
Attributes:
- topology_id
- component_id
- window
- is_include_sys
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'topology_id', None, None, ), # 1
(2, TType.STRING, 'component_id', None, None, ), # 2
(3, TType.STRING, 'window', None, None, ), # 3
(4, TType.BOOL, 'is_include_sys', None, None, ), # 4
)
def __init__(self, topology_id=None, component_id=None, window=None, is_include_sys=None,):
self.topology_id = topology_id
self.component_id = component_id
self.window = window
self.is_include_sys = is_include_sys
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.topology_id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.component_id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.window = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.is_include_sys = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getComponentPageInfo_args')
if self.topology_id is not None:
oprot.writeFieldBegin('topology_id', TType.STRING, 1)
oprot.writeString(self.topology_id.encode('utf-8'))
oprot.writeFieldEnd()
if self.component_id is not None:
oprot.writeFieldBegin('component_id', TType.STRING, 2)
oprot.writeString(self.component_id.encode('utf-8'))
oprot.writeFieldEnd()
if self.window is not None:
oprot.writeFieldBegin('window', TType.STRING, 3)
oprot.writeString(self.window.encode('utf-8'))
oprot.writeFieldEnd()
if self.is_include_sys is not None:
oprot.writeFieldBegin('is_include_sys', TType.BOOL, 4)
oprot.writeBool(self.is_include_sys)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.topology_id)
value = (value * 31) ^ hash(self.component_id)
value = (value * 31) ^ hash(self.window)
value = (value * 31) ^ hash(self.is_include_sys)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getComponentPageInfo_result:
"""
Attributes:
- success
- e
- aze
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ComponentPageInfo, ComponentPageInfo.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, e=None, aze=None,):
self.success = success
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ComponentPageInfo()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getComponentPageInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyConf_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyConf_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyConf_result:
"""
Attributes:
- success
- e
- aze
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, e=None, aze=None,):
self.success = success
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyConf_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8'))
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopology_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopology_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopology_result:
"""
Attributes:
- success
- e
- aze
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (StormTopology, StormTopology.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, e=None, aze=None,):
self.success = success
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = StormTopology()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopology_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getUserTopology_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getUserTopology_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getUserTopology_result:
"""
Attributes:
- success
- e
- aze
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (StormTopology, StormTopology.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, e=None, aze=None,):
self.success = success
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = StormTopology()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getUserTopology_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyHistory_args:
"""
Attributes:
- user
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'user', None, None, ), # 1
)
def __init__(self, user=None,):
self.user = user
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.user = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyHistory_args')
if self.user is not None:
oprot.writeFieldBegin('user', TType.STRING, 1)
oprot.writeString(self.user.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.user)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyHistory_result:
"""
Attributes:
- success
- aze
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TopologyHistoryInfo, TopologyHistoryInfo.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, aze=None,):
self.success = success
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TopologyHistoryInfo()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyHistory_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getOwnerResourceSummaries_args:
"""
Attributes:
- owner
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'owner', None, None, ), # 1
)
def __init__(self, owner=None,):
self.owner = owner
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.owner = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getOwnerResourceSummaries_args')
if self.owner is not None:
oprot.writeFieldBegin('owner', TType.STRING, 1)
oprot.writeString(self.owner.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.owner)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getOwnerResourceSummaries_result:
"""
Attributes:
- success
- aze
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(OwnerResourceSummary, OwnerResourceSummary.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, aze=None,):
self.success = success
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype808, _size805) = iprot.readListBegin()
for _i809 in xrange(_size805):
_elem810 = OwnerResourceSummary()
_elem810.read(iprot)
self.success.append(_elem810)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getOwnerResourceSummaries_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter811 in self.success:
iter811.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class processWorkerMetrics_args:
"""
Attributes:
- metrics
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'metrics', (WorkerMetrics, WorkerMetrics.thrift_spec), None, ), # 1
)
def __init__(self, metrics=None,):
self.metrics = metrics
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.metrics = WorkerMetrics()
self.metrics.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('processWorkerMetrics_args')
if self.metrics is not None:
oprot.writeFieldBegin('metrics', TType.STRUCT, 1)
self.metrics.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.metrics)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class processWorkerMetrics_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('processWorkerMetrics_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| {
"content_hash": "f04e1503167e4748cb6021db6beff077",
"timestamp": "",
"source": "github",
"line_count": 10272,
"max_line_length": 188,
"avg_line_length": 30.737344236760123,
"alnum_prop": 0.6402288001925671,
"repo_name": "srishtyagrawal/storm",
"id": "5f9f324c58ccb9d6f2de6ff2b1de2056cc7d4802",
"size": "316692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storm-client/src/py/storm/Nimbus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "53621"
},
{
"name": "CSS",
"bytes": "12597"
},
{
"name": "Clojure",
"bytes": "521695"
},
{
"name": "Fancy",
"bytes": "6234"
},
{
"name": "FreeMarker",
"bytes": "3512"
},
{
"name": "HTML",
"bytes": "187245"
},
{
"name": "Java",
"bytes": "10953815"
},
{
"name": "JavaScript",
"bytes": "74069"
},
{
"name": "M4",
"bytes": "1522"
},
{
"name": "Makefile",
"bytes": "1302"
},
{
"name": "PowerShell",
"bytes": "3405"
},
{
"name": "Python",
"bytes": "888842"
},
{
"name": "Ruby",
"bytes": "15777"
},
{
"name": "Shell",
"bytes": "23696"
},
{
"name": "Thrift",
"bytes": "28093"
},
{
"name": "XSLT",
"bytes": "1365"
}
],
"symlink_target": ""
} |
from glob import *
import turhouseconfig
from event import notification
configfile = ''
Glob.config = turhouseconfig.TurhouseConfig(configfile)
notification.ezs_notification('Test: ', 'Test Device', 'Test event')
| {
"content_hash": "1f40d3b0268a3ca7e59f9da69a2a9b10",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 68,
"avg_line_length": 30.571428571428573,
"alnum_prop": 0.7897196261682243,
"repo_name": "janredl/turhouse",
"id": "6d4a7ba5f4afc160c31e55e9b72e5d8338947eb4",
"size": "261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "45949"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import pandas as pd
from . import ProgressiveTest
from progressivis.core import aio, Sink
from progressivis import Print
from progressivis.table import Table
from progressivis.table.cmp_query import CmpQueryLast
from progressivis.table.constant import Constant
from progressivis.stats import RandomTable
from progressivis.table.stirrer import Stirrer
from progressivis.core.bitmap import bitmap
from typing import Any
class TestCmpQuery(ProgressiveTest):
def test_cmp_query(self) -> None:
s = self.scheduler()
random = RandomTable(10, rows=10000, scheduler=s)
cmp_ = CmpQueryLast(scheduler=s)
sink = Sink(name="sink", scheduler=s)
sink.input.inp = cmp_.output.result
cst = Table("cmp_table", data={"_1": [0.5]})
value = Constant(cst, scheduler=s)
cmp_.input.cmp = value.output.result
cmp_.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = cmp_.output.select
aio.run(s.start())
tbl = cmp_.get_input_slot("table").data()
df = pd.DataFrame(tbl.to_dict(), index=tbl.index.to_array())
dfe = df.eval("_1<0.5")
self.assertEqual(cmp_._bitmap, bitmap(df.index[dfe]))
# s.join()
def t_cmp_query_impl(self, **kw: Any) -> None:
s = self.scheduler()
random = RandomTable(10, rows=10000, scheduler=s)
stirrer = Stirrer(update_column="_1", fixed_step_size=100, scheduler=s, **kw)
stirrer.input[0] = random.output.result
cmp_ = CmpQueryLast(scheduler=s)
sink = Sink(name="sink", scheduler=s)
sink.input.inp = cmp_.output.result
cst = Table("cmp_table", data={"_1": [0.5]})
value = Constant(cst, scheduler=s)
cmp_.input.cmp = value.output.result
cmp_.input[0] = stirrer.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = cmp_.output.select
aio.run(s.start())
tbl = cmp_.get_input_slot("table").data()
df = pd.DataFrame(tbl.to_dict(), index=tbl.index.to_array())
dfe = df.eval("_1<0.5")
self.assertEqual(cmp_._bitmap, bitmap(df.index[dfe]))
def test_cmp_query2(self) -> None:
return self.t_cmp_query_impl(delete_rows=5)
def test_cmp_query3(self) -> None:
return self.t_cmp_query_impl(update_rows=5)
if __name__ == "__main__":
ProgressiveTest.main()
| {
"content_hash": "18c1e55d54c73d13e7aa3029b6efd85b",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 85,
"avg_line_length": 37.07575757575758,
"alnum_prop": 0.6322026971802207,
"repo_name": "jdfekete/progressivis",
"id": "d5576c0de38ddc2f0eb4633de5e1e66cdfb32742",
"size": "2447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_03_cmp_query.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "C++",
"bytes": "13874"
},
{
"name": "CSS",
"bytes": "20470"
},
{
"name": "Cython",
"bytes": "1747"
},
{
"name": "HTML",
"bytes": "34701"
},
{
"name": "JavaScript",
"bytes": "305156"
},
{
"name": "Jupyter Notebook",
"bytes": "277333"
},
{
"name": "Python",
"bytes": "1812925"
},
{
"name": "Shell",
"bytes": "905"
}
],
"symlink_target": ""
} |
import unittest
from unittest.mock import Mock, patch
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.tableau.hooks.tableau import TableauJobFinishCode
from airflow.providers.tableau.operators.tableau import TableauOperator
class TestTableauOperator(unittest.TestCase):
"""
Test class for TableauOperator
"""
def setUp(self):
"""
setup
"""
self.mocked_workbooks = []
self.mock_datasources = []
for i in range(3):
mock_workbook = Mock()
mock_workbook.id = i
mock_workbook.name = f'wb_{i}'
self.mocked_workbooks.append(mock_workbook)
mock_datasource = Mock()
mock_datasource.id = i
mock_datasource.name = f'ds_{i}'
self.mock_datasources.append(mock_datasource)
self.kwargs = {
'site_id': 'test_site',
'task_id': 'task',
'dag': None,
'match_with': 'name',
'method': 'refresh',
}
@patch('airflow.providers.tableau.operators.tableau.TableauHook')
def test_execute_workbooks(self, mock_tableau_hook):
"""
Test Execute Workbooks
"""
mock_tableau_hook.get_all = Mock(return_value=self.mocked_workbooks)
mock_tableau_hook.return_value.__enter__ = Mock(return_value=mock_tableau_hook)
operator = TableauOperator(blocking_refresh=False, find='wb_2', resource='workbooks', **self.kwargs)
job_id = operator.execute(context={})
mock_tableau_hook.server.workbooks.refresh.assert_called_once_with(2)
assert mock_tableau_hook.server.workbooks.refresh.return_value.id == job_id
@patch('airflow.providers.tableau.operators.tableau.TableauHook')
def test_execute_workbooks_blocking(self, mock_tableau_hook):
"""
Test execute workbooks blocking
"""
mock_tableau_hook.get_all = Mock(return_value=self.mocked_workbooks)
mock_tableau_hook.return_value.__enter__ = Mock(return_value=mock_tableau_hook)
mock_tableau_hook.server.jobs.get_by_id = Mock(
return_value=Mock(finish_code=TableauJobFinishCode.SUCCESS.value)
)
operator = TableauOperator(find='wb_2', resource='workbooks', **self.kwargs)
job_id = operator.execute(context={})
mock_tableau_hook.server.workbooks.refresh.assert_called_once_with(2)
assert mock_tableau_hook.server.workbooks.refresh.return_value.id == job_id
mock_tableau_hook.wait_for_state.assert_called_once_with(
job_id=job_id, check_interval=20, target_state=TableauJobFinishCode.SUCCESS
)
@patch('airflow.providers.tableau.operators.tableau.TableauHook')
def test_execute_missing_workbook(self, mock_tableau_hook):
"""
Test execute missing workbook
"""
mock_tableau_hook.get_all = Mock(return_value=self.mocked_workbooks)
mock_tableau_hook.return_value.__enter__ = Mock(return_value=mock_tableau_hook)
operator = TableauOperator(find='test', resource='workbooks', **self.kwargs)
with pytest.raises(AirflowException):
operator.execute({})
@patch('airflow.providers.tableau.operators.tableau.TableauHook')
def test_execute_datasources(self, mock_tableau_hook):
"""
Test Execute datasources
"""
mock_tableau_hook.get_all = Mock(return_value=self.mock_datasources)
mock_tableau_hook.return_value.__enter__ = Mock(return_value=mock_tableau_hook)
operator = TableauOperator(blocking_refresh=False, find='ds_2', resource='datasources', **self.kwargs)
job_id = operator.execute(context={})
mock_tableau_hook.server.datasources.refresh.assert_called_once_with(2)
assert mock_tableau_hook.server.datasources.refresh.return_value.id == job_id
@patch('airflow.providers.tableau.operators.tableau.TableauHook')
def test_execute_datasources_blocking(self, mock_tableau_hook):
"""
Test execute datasources blocking
"""
mock_tableau_hook.get_all = Mock(return_value=self.mock_datasources)
mock_tableau_hook.return_value.__enter__ = Mock(return_value=mock_tableau_hook)
operator = TableauOperator(find='ds_2', resource='datasources', **self.kwargs)
job_id = operator.execute(context={})
mock_tableau_hook.server.datasources.refresh.assert_called_once_with(2)
assert mock_tableau_hook.server.datasources.refresh.return_value.id == job_id
mock_tableau_hook.wait_for_state.assert_called_once_with(
job_id=job_id, check_interval=20, target_state=TableauJobFinishCode.SUCCESS
)
@patch('airflow.providers.tableau.operators.tableau.TableauHook')
def test_execute_missing_datasource(self, mock_tableau_hook):
"""
Test execute missing datasource
"""
mock_tableau_hook.get_all = Mock(return_value=self.mock_datasources)
mock_tableau_hook.return_value.__enter__ = Mock(return_value=mock_tableau_hook)
operator = TableauOperator(find='test', resource='datasources', **self.kwargs)
with pytest.raises(AirflowException):
operator.execute({})
def test_execute_unavailable_resource(self):
"""
Test execute unavailable resource
"""
operator = TableauOperator(resource='test', find='test', **self.kwargs)
with pytest.raises(AirflowException):
operator.execute({})
def test_get_resource_id(self):
"""
Test get resource id
"""
resource_id = 'res_id'
operator = TableauOperator(resource='task', find=resource_id, method='run', task_id='t', dag=None)
assert operator._get_resource_id(resource_id) == resource_id
| {
"content_hash": "02bdf06886e7d32de337c7c650247c21",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 110,
"avg_line_length": 39.45945945945946,
"alnum_prop": 0.6525684931506849,
"repo_name": "dhuang/incubator-airflow",
"id": "4d8e20f5e02284e355e55531cce234f7d95a9dd9",
"size": "6626",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/providers/tableau/operators/test_tableau.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
} |
import logging
log = logging.getLogger(__name__)
try:
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU, LeakyReLU
from keras.optimizers import Adagrad, Adadelta, RMSprop, Adam
from keras.layers.core import Dense
from keras.utils import to_categorical
import_keras = True
except:
import_keras = False
log.info('could not import keras. Neural networks will not be used')
def keras_create_model(params, problem_type):
# creates a neural net model with params definition
log.info('creating NN structure')
model = Sequential()
for l in range(int(params['number_layers'])):
if l == 0:
model.add(Dense(units=params['units'], input_dim=params['input_dim']))
else:
model.add(Dense(units=params['units']))
model.add(Activation(params['activation']))
if params['batch_normalization']:
model.add(BatchNormalization())
model.add(Dropout(params['dropout']))
model.add(Dense(params['output_dim']))
if problem_type == 'classification':
model.add(Activation('sigmoid'))
keras_compile_model(model, params, problem_type)
return model
def keras_compile_model(model, params, problem_type):
# compile the model (usefull to reset weights also)
log.info('compiling NN model')
if params['optimizer'] == 'Adagrad':
optimizer = Adagrad(lr=params['learning_rate'])
elif params['optimizer'] == 'Adadelta':
optimizer = Adadelta(lr=params['learning_rate'])
elif params['optimizer'] == 'Adam':
optimizer = Adam(lr=params['learning_rate'])
else:
optimizer = RMSprop(lr=params['learning_rate'])
if problem_type == 'regression':
loss = 'mse'
elif params['output_dim'] == 2:
loss = 'binary_crossentropy'
else:
loss = 'categorical_crossentropy'
model.compile(loss=loss, optimizer=optimizer)
| {
"content_hash": "47b89d89816e50ed4bafbd94add877cc",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 82,
"avg_line_length": 32.904761904761905,
"alnum_prop": 0.6637723106608779,
"repo_name": "pierre-chaville/automlk",
"id": "f81e879774ab49f0871068206ee633d3be5c9c84",
"size": "2073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "automlk/utils/keras_wrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "867"
},
{
"name": "CSS",
"bytes": "207943"
},
{
"name": "HTML",
"bytes": "108986"
},
{
"name": "Jupyter Notebook",
"bytes": "25275"
},
{
"name": "Python",
"bytes": "322808"
},
{
"name": "Shell",
"bytes": "337"
}
],
"symlink_target": ""
} |
import vim
from orgmode._vim import echo, echom, echoe, ORGMODE, apply_count, repeat, insert_at_cursor, indent_orgmode
from orgmode import settings
from orgmode.menu import Submenu, Separator, ActionEntry, add_cmd_mapping_menu
from orgmode.keybinding import Keybinding, Plug, Command
from orgmode.liborgmode.checkboxes import Checkbox
from orgmode.liborgmode.dom_obj import OrderListType
from orgmode.py3compat.encode_compatibility import *
from orgmode.py3compat.py_py3_string import *
from orgmode.py3compat.unicode_compatibility import *
class EditCheckbox(object):
u"""
Checkbox plugin.
"""
def __init__(self):
u""" Initialize plugin """
object.__init__(self)
# menu entries this plugin should create
self.menu = ORGMODE.orgmenu + Submenu(u'Edit Checkbox')
# key bindings for this plugin
# key bindings are also registered through the menu so only additional
# bindings should be put in this variable
self.keybindings = []
# commands for this plugin
self.commands = []
@classmethod
def new_checkbox(cls, below=None, plain=None):
'''
if below is:
True -> create new list below current line
False/None -> create new list above current line
if plain is:
True -> create a plainlist item
False/None -> create an empty checkbox
'''
d = ORGMODE.get_document()
h = d.current_heading()
if h is None:
return
# init checkboxes for current heading
h.init_checkboxes()
c = h.current_checkbox()
nc = Checkbox()
nc._heading = h
# default checkbox level
level = h.level + 1
start = vim.current.window.cursor[0] - 1
# if no checkbox is found, insert at current line with indent level=1
if c is None:
h.checkboxes.append(nc)
else:
l = c.get_parent_list()
idx = c.get_index_in_parent_list()
if l is not None and idx is not None:
l.insert(idx + (1 if below else 0), nc)
# workaround for broken associations, Issue #165
nc._parent = c.parent
if below:
if c.next_sibling:
c.next_sibling._previous_sibling = nc
nc._next_sibling = c.next_sibling
c._next_sibling = nc
nc._previous_sibling = c
else:
if c.previous_sibling:
c.previous_sibling._next_sibling = nc
nc._next_sibling = c
nc._previous_sibling = c.previous_sibling
c._previous_sibling = nc
t = c.type
# increase key for ordered lists
if t[-1] in OrderListType:
try:
num = int(t[:-1]) + (1 if below else -1)
if num < 0:
# don't decrease to numbers below zero
echom(u"Can't decrement further than '0'")
return
t = '%d%s' % (num, t[-1])
except ValueError:
try:
char = ord(t[:-1]) + (1 if below else -1)
if below:
if char == 91:
# stop incrementing at Z (90)
echom(u"Can't increment further than 'Z'")
return
elif char == 123:
# increment from z (122) to A
char = 65
else:
if char == 96:
# stop decrementing at a (97)
echom(u"Can't decrement further than 'a'")
return
elif char == 64:
# decrement from A (65) to z
char = 122
t = u'%s%s' % (chr(char), t[-1])
except ValueError:
pass
nc.type = t
level = c.level
if below:
start = c.end_of_last_child
else:
start = c.start
if plain: # only create plainlist item when requested
nc.status = None
nc.level = level
if below:
start += 1
# vim's buffer behave just opposite to Python's list when inserting a
# new item. The new entry is appended in vim put prepended in Python!
vim.current.buffer.append("") # workaround for neovim
vim.current.buffer[start:start] = [unicode(nc)]
del vim.current.buffer[-1] # restore from workaround for neovim
# update checkboxes status
cls.update_checkboxes_status()
# do not start insert upon adding new checkbox, Issue #211
if int(settings.get(u'org_prefer_insert_mode', u'1')):
vim.command(u_encode(u'exe "normal %dgg"|startinsert!' % (start + 1, )))
else:
vim.command(u_encode(u'exe "normal %dgg$"' % (start + 1, )))
@classmethod
def toggle(cls, checkbox=None):
u"""
Toggle the checkbox given in the parameter.
If the checkbox is not given, it will toggle the current checkbox.
"""
d = ORGMODE.get_document()
current_heading = d.current_heading()
# init checkboxes for current heading
if current_heading is None:
return
current_heading = current_heading.init_checkboxes()
if checkbox is None:
# get current_checkbox
c = current_heading.current_checkbox()
# no checkbox found
if c is None:
cls.update_checkboxes_status()
return
else:
c = checkbox
if c.status == Checkbox.STATUS_OFF or c.status is None:
# set checkbox status on if all children are on
if c.all_children_status()[0] == 0 or c.are_children_all(Checkbox.STATUS_ON):
c.toggle()
d.write_checkbox(c)
elif c.status is None:
c.status = Checkbox.STATUS_OFF
d.write_checkbox(c)
elif c.status == Checkbox.STATUS_ON:
if c.all_children_status()[0] == 0 or c.is_child_one(Checkbox.STATUS_OFF):
c.toggle()
d.write_checkbox(c)
elif c.status == Checkbox.STATUS_INT:
# can't toggle intermediate state directly according to emacs orgmode
pass
# update checkboxes status
cls.update_checkboxes_status()
@classmethod
def _update_subtasks(cls):
d = ORGMODE.get_document()
h = d.current_heading()
# init checkboxes for current heading
h.init_checkboxes()
# update heading subtask info
c = h.first_checkbox
if c is None:
return
total, on = c.all_siblings_status()
h.update_subtasks(total, on)
# update all checkboxes under current heading
cls._update_checkboxes_subtasks(c)
@classmethod
def _update_checkboxes_subtasks(cls, checkbox):
# update checkboxes
for c in checkbox.all_siblings():
if c.children:
total, on = c.first_child.all_siblings_status()
c.update_subtasks(total, on)
cls._update_checkboxes_subtasks(c.first_child)
@classmethod
def update_checkboxes_status(cls):
d = ORGMODE.get_document()
h = d.current_heading()
if h is None:
return
# init checkboxes for current heading
h.init_checkboxes()
cls._update_checkboxes_status(h.first_checkbox)
cls._update_subtasks()
@classmethod
def _update_checkboxes_status(cls, checkbox=None):
u""" helper function for update checkboxes status
:checkbox: The first checkbox of this indent level
:return: The status of the parent checkbox
"""
if checkbox is None:
return
status_off, status_on, status_int, total = 0, 0, 0, 0
# update all top level checkboxes' status
for c in checkbox.all_siblings():
current_status = c.status
# if this checkbox is not leaf, its status should determine by all its children
if c.all_children_status()[0] > 0:
current_status = cls._update_checkboxes_status(c.first_child)
# don't update status if the checkbox has no status
if c.status is None:
current_status = None
# the checkbox needs to have status
else:
total += 1
# count number of status in this checkbox level
if current_status == Checkbox.STATUS_OFF:
status_off += 1
elif current_status == Checkbox.STATUS_ON:
status_on += 1
elif current_status == Checkbox.STATUS_INT:
status_int += 1
# write status if any update
if current_status is not None and c.status != current_status:
c.status = current_status
d = ORGMODE.get_document()
d.write_checkbox(c)
parent_status = Checkbox.STATUS_INT
# all silbing checkboxes are off status
if total == 0:
pass
elif status_off == total:
parent_status = Checkbox.STATUS_OFF
# all silbing checkboxes are on status
elif status_on == total:
parent_status = Checkbox.STATUS_ON
# one silbing checkbox is on or int status
elif status_on != 0 or status_int != 0:
parent_status = Checkbox.STATUS_INT
# other cases
else:
parent_status = None
return parent_status
def register(self):
u"""
Registration of the plugin.
Key bindings and other initialization should be done here.
"""
# default setting if it is not already set.
# checkbox related operation
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxNewAbove',
function=u'%s ORGMODE.plugins[u"EditCheckbox"].new_checkbox()<CR>' % VIM_PY_CALL,
key_mapping=u'<localleader>cN',
menu_desrc=u'New CheckBox Above'
)
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxNewBelow',
function=u'%s ORGMODE.plugins[u"EditCheckbox"].new_checkbox(below=True)<CR>' % VIM_PY_CALL,
key_mapping=u'<localleader>cn',
menu_desrc=u'New CheckBox Below'
)
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxToggle',
function=u':silent! %s ORGMODE.plugins[u"EditCheckbox"].toggle()<CR>' % VIM_PY_CALL,
key_mapping=u'<localleader>cc',
menu_desrc=u'Toggle Checkbox'
)
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxUpdate',
function=u':silent! %s ORGMODE.plugins[u"EditCheckbox"].update_checkboxes_status()<CR>' % VIM_PY_CALL,
key_mapping=u'<localleader>c#',
menu_desrc=u'Update Subtasks'
)
# plainlist related operation
add_cmd_mapping_menu(
self,
name=u'OrgPlainListItemNewAbove',
function=u'%s ORGMODE.plugins[u"EditCheckbox"].new_checkbox(plain=True)<CR>' % VIM_PY_CALL,
key_mapping=u'<localleader>cL',
menu_desrc=u'New PlainList Item Above'
)
add_cmd_mapping_menu(
self,
name=u'OrgPlainListItemNewBelow',
function=u'%s ORGMODE.plugins[u"EditCheckbox"].new_checkbox(below=True, plain=True)<CR>' % VIM_PY_CALL,
key_mapping=u'<localleader>cl',
menu_desrc=u'New PlainList Item Below'
)
# vim: set noexpandtab:
| {
"content_hash": "e3f1757d2fd47242ea57bd6a3577496f",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 107,
"avg_line_length": 29.3140243902439,
"alnum_prop": 0.6764430577223088,
"repo_name": "lucianp/dotfiles",
"id": "bb93fc1757fd04c3e6dc506b222032b68e45e4c4",
"size": "9640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "link/.vim/ftplugin/orgmode/plugins/EditCheckbox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12728"
},
{
"name": "Erlang",
"bytes": "1972"
},
{
"name": "HTML",
"bytes": "16488"
},
{
"name": "JavaScript",
"bytes": "1064"
},
{
"name": "Makefile",
"bytes": "208"
},
{
"name": "Python",
"bytes": "232360"
},
{
"name": "Ruby",
"bytes": "248"
},
{
"name": "Shell",
"bytes": "8489"
},
{
"name": "Vim script",
"bytes": "1616576"
}
],
"symlink_target": ""
} |
import csv
import numpy as np
import xlrd
from openerp.tools.translate import _
from openerp import netsvc
from openerp.osv import osv, fields
from lxml import etree
from tempfile import TemporaryFile
import base64
import os
import time
from datetime import date, datetime
class is_contract_automobile(osv.osv):
_name = "is.contract.automobile.line"
_columns = {
'ref_partner': fields.char('Reference commande client', size=64),
'ref_product': fields.char('Reference article client', size=64),
'import_id': fields.many2one('is.import.contract', 'Import'),
}
is_contract_automobile()
class is_import_contract(osv.osv):
_name = "is.import.contract"
_description = "Importer les commandes ouvertes"
_columns = {
'partner_id': fields.many2one('res.partner', 'Client', required=True),
'import_function': fields.char("Fonction d'importation EDI", size=32, required=True),
'name': fields.char('Nom de fichier', size=128),
'file': fields.binary('Fichier', required=True),
'create_uid': fields.many2one('res.users', 'Importe par', readonly=True),
'create_date': fields.datetime("Date d'importation"),
'notfound_contract': fields.boolean('Contrats non trouves'),
'contract_ids': fields.one2many('is.contract.automobile.line', 'import_id', "Contrats non trouves dans le fichier d'import"),
}
_defaults = {
'notfound_contract': False,
}
# Remplir le champ import_function à partir de champ partner_id
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {}
part = self.pool.get('res.partner').browse(cr, uid, part, context=context)
val = {
'import_function': part.import_function,
}
return {'value': val}
def _create_message_log(self, cr, uid, id_import, msg_error, context=None):
msg_obj = self.pool.get('mail.message')
mail_values = {
'type' :'comment',
'model': 'casa_porta.import',
'body': '<p>' + msg_error + '<p>',
'res_id':id_import
}
msg_obj.create(cr, uid, mail_values)
# Retourner les contrats de la liste des informations extrait de fichier d'importation
def get_contracts(self, cr, uid, data, context=None):
res = []
if data:
for item in data:
res.append((item['ref_product'], item['ref_partner']))
return res
# Retourner la liste des contrats (couple(refcommandeclient, refproduitclient)) lié au client choisi
def get_contracts_partner(self, cr, uid, partner_id, context=None):
res = []
contract_obj = self.pool.get('contract.automobile')
contract_ids = contract_obj.search(cr, uid, [('partner_id', '=', partner_id)], context=context)
if contract_ids:
for contract in contract_obj.read(cr, uid, contract_ids, ['ref_product','ref_partner']):
res.append((contract['ref_product'], contract['ref_partner']))
return res
# Comparer la liste des contrats de fichier d'importaion avec celle associée au client
# Retourner les contrats non trouvés
def compare_lst_contracts(self, cr, uid, lst_contracts_partner, lst_contracts, context=None):
res = []
for contract in lst_contracts:
if not contract in lst_contracts_partner:
res.append(contract)
else:
continue
return res
# Faire Afficher la liste des contrats non existants
def contracts_notfound(self, cr, uid, id, lst_contracts_notfound, context=None):
res = []
print "lst_contracts_notfound*****", lst_contracts_notfound
for contract in lst_contracts_notfound:
newid = self.pool.get('is.contract.automobile.line').create(cr, uid, {'ref_partner': contract[0],
'ref_product': contract[1],
'import_id':id},
context=context)
res.append(newid)
return res
# Retourner l'id de contrat associé au couple (refcommandeclient, refproduitclient)
def get_contract_id(self, cr, uid, ref_partner, ref_product, context=None):
contract_obj = self.pool.get('contract.automobile')
contract_id = contract_obj.search(cr, uid, [('ref_partner','=',ref_partner),('ref_product','=',ref_product)], context=context)[0]
return contract_id
# Supprimer les devis associé au contrat courant et ayant une date supérieure à la date de jour
def delete_quotations(self, cr, uid, contract_id, context=None):
order_obj = self.pool.get('sale.order')
today = time.strftime('%Y-%m-%d')
order_ids = order_obj.search(cr, uid, [('contract_id','=',contract_id),('state','=','draft'),('date_livraison','>=',today)], context=context)
res = order_obj.unlink(cr, uid, order_ids, context=context)
return res
# interpreter la date de livraison de fichier d'importation
def convert_date(self, cr, uid, import_function, date_livraison, context=None):
if import_function == 'xml1':
date = time.strptime(date_livraison, '%Y%m%d%H%M%S')
return time.strftime('%Y-%m-%d', date)
elif import_function == 'csv1':
date = time.strptime(date_livraison, '%d.%m.%Y')
return time.strftime('%Y-%m-%d', date)
# interpreter le type de contrat de fichier d'importation
def convert_contract_type(self, cr, uid, import_function, contract_type, context=None):
if import_function == 'xml1':
if contract_type == '1':
return 'ferme'
elif contract_type == '4':
return 'previsionnel'
else:
return ''
if import_function == 'csv1':
if contract_type == 'F':
return 'ferme'
elif contract_type == 'P':
return 'previsionnel'
else:
return ''
# Creation de devis
def create_quotation(self, cr, uid, ids, import_function, contract_id, partner_id, detail, context=None):
order_obj = self.pool.get('sale.order')
order_line_obj = self.pool.get('sale.order.line')
contract_obj = self.pool.get('contract.automobile')
contract = contract_obj.browse(cr, uid, contract_id, context=context)
quotation_line = order_line_obj.product_id_change(cr, uid, ids, 1, contract.product_id.id, 0, False, 0, False, '', partner_id, False, True, False, False, False, False, context=context)['value']
quotation_line.update({'product_id':contract.product_id.id, 'product_uom_qty': detail['qty_product']})
quotation = order_obj.onchange_partner_id(cr, uid, ids, partner_id, context=context)['value']
date_livraison = self.convert_date(cr, uid, import_function, detail['date_livraison'], context=context)
date_expedition = order_obj.onchange_date_livraison(cr, uid, ids, date_livraison, partner_id, context=context)['value']['date_expedition']
type = self.convert_contract_type(cr, uid, import_function, detail['type_contract'], context=context)
if contract.ref_partner:
origin = contract.ref_partner + ', ' + contract.ref_product
else:
origin = contract.ref_product
quotation_values = {
'name': '/',
'partner_id': partner_id,
'client_order_ref': contract.ref_partner,
'contract_id': contract.id,
'type_contrat': type ,
'date_livraison': date_livraison,
'date_expedition': date_expedition,
'origin': origin,
'order_line': [[0,False,quotation_line]],
'picking_policy': 'direct',
'order_policy': 'manual',
'invoice_quantity': 'order',
}
quotation.update(quotation_values)
newid = order_obj.create(cr, uid, quotation, context=context)
return newid
def import_contract_orders(self, cr, uid, ids, context=None):
if context is None:
context = {}
xml1_obj = self.pool.get('is_import_xml1')
csv1_obj = self.pool.get('is_import_csv1')
result = []
data = self.read(cr, uid, ids)[0]
if data:
# Extraire les informations du fichier d'importation à utiliser dans la création des commandes ouvertes
if data['import_function'] == 'xml1':
res = xml1_obj.get_data_xml(cr, uid, data, context=context)
elif data['import_function'] == 'csv1':
res = csv1_obj.get_data_csv(cr, uid, data, context=context)
else:
res = []
print "res********", res
lst_contracts = self.get_contracts(cr, uid, res, context=context)
print "lst_contracts********", lst_contracts
lst_contracts_partner = self.get_contracts_partner(cr, uid, data['partner_id'][0], context=context)
print "lst_contracts_partner********", lst_contracts_partner
lst_contracts_notfound = self.compare_lst_contracts(cr, uid, lst_contracts_partner, lst_contracts, context=context)
if lst_contracts_notfound:
print "lst_contracts_notfound********", lst_contracts_notfound
self.write(cr, uid, ids[0], {'notfound_contract': True}, context=context)
line_ids = self.contracts_notfound(cr, uid, ids[0], lst_contracts_notfound, context=context)
return line_ids
else:
for item in res:
# déterminer l'id de contrat associé au couple (refcommandeclient, refproduitclient)
contract_id = self.get_contract_id(cr, uid, item['ref_partner'], item['ref_product'], context=context)
# Suppression des devis ayant une date de livraison supérieure à la date de jour
self.delete_quotations(cr, uid, contract_id, context=context)
for detail in item['details']:
newid = self.create_quotation(cr, uid, ids, data['import_function'], contract_id, data['partner_id'][0], detail, context=context)
result.append(newid)
result.sort()
action_model = False
data_pool = self.pool.get('ir.model.data')
action = {}
action_model,action_id = data_pool.get_object_reference(cr, uid, 'sale', "action_quotations")
if action_model:
action_pool = self.pool.get(action_model)
action = action_pool.read(cr, uid, action_id, context=context)
action['domain'] = "[('id','in', ["+','.join(map(str,result))+"])]"
return action
#===============================================================================
# def import_csv(self, cr, uid, ids, context=None):
# if context is None:
# context = {}
#
# data = self.read(cr, uid, ids)[0]
# print "data *******", data
#
# result = []
#
# if data:
# quotechar='"'
# delimiter=','
#
# name = str(datetime.now()).replace(':', '').replace('.', '').replace(' ', '')
# filename = '/tmp/%s.xls' % name
# temp = open(filename, 'w+b')
# temp.write((base64.decodestring(data['file'])))
# temp.close()
#
# csv_file = self.csv_from_excel(cr, uid, filename, name, context=context)
# file_obj = open(csv_file, 'rb')
# reader = csv.reader(file_obj, quotechar=str(quotechar), delimiter=str(delimiter))
#
# # Extraire les informations à utiliser dans la création des commandes ouvertes
# res = self.get_data_csv(cr, uid, reader, context=context)
# print "get_data ********", res
# lst_contracts_csv = self.get_contracts_xml(cr, uid, res, context=context)
# print "lst_contracts_xml**********", lst_contracts_csv
# lst_contracts_partner = self.get_contracts_partner(cr, uid, data['partner_id'][0], context=context)
# print "lst_contracts_partner********", lst_contracts_partner
# lst_contracts_notfound = self.compare_lst_contracts(cr, uid, lst_contracts_partner, lst_contracts_csv, context=context)
# print "lst_contracts_notfound*******", lst_contracts_notfound
# if lst_contracts_notfound:
# raise osv.except_osv(_("Warning"), _("Le fichier d'import ne contient pas tous les contrats du client"))
# else:
# for item in res:
# # déterminer l'id de contrat associé au couple (refcommandeclient, refproduitclient)
# contract_id = self.get_contract_id(cr, uid, item['ref_partner'], item['ref_product'], context=context)
# # Suppression des devis ayant une date de livraison supérieure à la date de jour
# self.delete_quotations(cr, uid, contract_id, context=context)
# for detail in item['details']:
# newid = self.create_quotation(cr, uid, ids, data['import_function'], contract_id, data['partner_id'][0], detail, context=context)
# result.append(newid)
#
# action_model = False
# data_pool = self.pool.get('ir.model.data')
# action = {}
# action_model,action_id = data_pool.get_object_reference(cr, uid, 'sale', "action_quotations")
#
# if action_model:
# action_pool = self.pool.get(action_model)
# action = action_pool.read(cr, uid, action_id, context=context)
# action['domain'] = "[('id','in', ["+','.join(map(str,result))+"])]"
# return action
#===============================================================================
is_import_contract()
| {
"content_hash": "91a0d5804f39f7290edabfed7c57c2ad",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 201,
"avg_line_length": 48.63333333333333,
"alnum_prop": 0.5565455791638109,
"repo_name": "tonygalmiche/is_contract_edi",
"id": "abbed9dda28a49032426a8cb99bb98fb83b91848",
"size": "14635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "is_import.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32863"
}
],
"symlink_target": ""
} |
"""
Objects representing MediaWiki sites (wikis).
This module also includes functions to load families, which are
groups of wikis on the same topic in different languages.
"""
#
# (C) Pywikibot team, 2008-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import copy
import datetime
import functools
import heapq
import itertools
import json
import mimetypes
import os
import re
import sys
import threading
import time
from collections import Iterable, Container, namedtuple, Mapping
from warnings import warn
import pywikibot
import pywikibot.family
from pywikibot.comms.http import get_authentication
from pywikibot.data import api
from pywikibot.echo import Notification
from pywikibot.exceptions import (
Error,
PageRelatedError,
EditConflict,
PageCreatedConflict,
PageDeletedConflict,
ArticleExistsConflict,
IsNotRedirectPage,
CircularRedirect,
InterwikiRedirectPage,
InconsistentTitleReceived,
LockedPage,
CascadeLockedPage,
LockedNoPage,
NoPage,
SiteDefinitionError,
UnknownSite,
UnknownExtension,
FamilyMaintenanceWarning,
NoUsername,
SpamfilterError,
NoCreateError,
UserBlocked,
EntityTypeUnknownException,
FatalServerError,
PageSaveRelatedError,
)
from pywikibot.family import WikimediaFamily
from pywikibot.throttle import Throttle
from pywikibot.tools import (
compute_file_hash,
itergroup, UnicodeMixin, ComparableMixin, SelfCallMixin, SelfCallString,
deprecated, deprecate_arg, deprecated_args, remove_last_args,
redirect_func, issue_deprecation_warning,
manage_wrapping, MediaWikiVersion, first_upper, normalize_username,
merge_unique_dicts,
PY2,
filter_unique,
)
from pywikibot.tools.ip import is_IP
if sys.version_info[0] > 2:
from itertools import zip_longest
from urllib.parse import urlencode, urlparse
basestring = (str,)
unicode = str
else:
from itertools import izip_longest as zip_longest
from urllib import urlencode
from urlparse import urlparse
_logger = "wiki.site"
class PageInUse(pywikibot.Error):
"""Page cannot be reserved for writing due to existing lock."""
class LoginStatus(object):
"""
Enum for Login statuses.
>>> LoginStatus.NOT_ATTEMPTED
-3
>>> LoginStatus.AS_USER
0
>>> LoginStatus.name(-3)
'NOT_ATTEMPTED'
>>> LoginStatus.name(0)
'AS_USER'
"""
NOT_ATTEMPTED = -3
IN_PROGRESS = -2
NOT_LOGGED_IN = -1
AS_USER = 0
AS_SYSOP = 1
@classmethod
def name(cls, search_value):
"""Return the name of a LoginStatus by it's value."""
for key, value in cls.__dict__.items():
if key == key.upper() and value == search_value:
return key
raise KeyError("Value %r could not be found in this enum"
% search_value)
def __init__(self, state):
"""Constructor."""
self.state = state
def __repr__(self):
"""Return internal representation."""
return 'LoginStatus(%s)' % (LoginStatus.name(self.state))
Family = redirect_func(pywikibot.family.Family.load,
target_module='pywikibot.family.Family',
old_name='Family')
class Namespace(Iterable, ComparableMixin, UnicodeMixin):
"""
Namespace site data object.
This is backwards compatible with the structure of entries
in site._namespaces which were a list of::
[customised namespace,
canonical namespace name?,
namespace alias*]
If the canonical_name is not provided for a namespace between -2
and 15, the MediaWiki 1.14+ built-in names are used.
Enable use_image_name to use built-in names from MediaWiki 1.13
and earlier as the details.
Image and File are aliases of each other by default.
If only one of canonical_name and custom_name are available, both
properties will have the same value.
"""
MEDIA = -2
SPECIAL = -1
MAIN = 0
TALK = 1
USER = 2
USER_TALK = 3
PROJECT = 4
PROJECT_TALK = 5
FILE = 6
FILE_TALK = 7
MEDIAWIKI = 8
MEDIAWIKI_TALK = 9
TEMPLATE = 10
TEMPLATE_TALK = 11
HELP = 12
HELP_TALK = 13
CATEGORY = 14
CATEGORY_TALK = 15
# These are the MediaWiki built-in names for MW 1.14+.
# Namespace prefixes are always case-insensitive, but the
# canonical forms are capitalized.
canonical_namespaces = {
-2: u"Media",
-1: u"Special",
0: u"",
1: u"Talk",
2: u"User",
3: u"User talk",
4: u"Project",
5: u"Project talk",
6: u"File",
7: u"File talk",
8: u"MediaWiki",
9: u"MediaWiki talk",
10: u"Template",
11: u"Template talk",
12: u"Help",
13: u"Help talk",
14: u"Category",
15: u"Category talk",
}
def __init__(self, id, canonical_name=None, custom_name=None,
aliases=None, use_image_name=False, **kwargs):
"""Constructor.
@param custom_name: Name defined in server LocalSettings.php
@type custom_name: unicode
@param canonical_name: Canonical name
@type canonical_name: str
@param aliases: Aliases
@type aliases: list of unicode
@param use_image_name: Use 'Image' as default canonical
for 'File' namespace
@param use_image_name: bool
"""
self.id = id
if aliases is None:
self.aliases = []
else:
self.aliases = aliases
if not canonical_name and id in self.canonical_namespaces:
if use_image_name:
if id == 6:
canonical_name = u'Image'
elif id == 7:
canonical_name = u"Image talk"
if not canonical_name:
canonical_name = self.canonical_namespaces[id]
assert custom_name is not None or canonical_name is not None, \
'Namespace needs to have at least one name'
self.custom_name = custom_name \
if custom_name is not None else canonical_name
self.canonical_name = canonical_name \
if canonical_name is not None else custom_name
if not aliases:
if id in (6, 7):
if use_image_name:
alias = u'File'
else:
alias = u'Image'
if id == 7:
alias += u' talk'
self.aliases = [alias]
else:
self.aliases = []
else:
self.aliases = aliases
for key, value in kwargs.items():
setattr(self, key, value)
def _distinct(self):
if self.custom_name == self.canonical_name:
return [self.canonical_name] + self.aliases
else:
return [self.custom_name, self.canonical_name] + self.aliases
def _contains_lowercase_name(self, name):
"""Determine a lowercase normalised name is a name of this namespace.
@rtype: bool
"""
return name in [x.lower() for x in self._distinct()]
def __contains__(self, item):
"""Determine if item is a name of this namespace.
The comparison is case insensitive, and item may have a single
colon on one or both sides of the name.
@param item: name to check
@type item: basestring
@rtype: bool
"""
if item == '' and self.id == 0:
return True
name = Namespace.normalize_name(item)
if not name:
return False
return self._contains_lowercase_name(name.lower())
def __len__(self):
"""Obtain length of the iterable."""
if self.custom_name == self.canonical_name:
return len(self.aliases) + 1
else:
return len(self.aliases) + 2
def __iter__(self):
"""Return an iterator."""
return iter(self._distinct())
def __getitem__(self, index):
"""Obtain an item from the iterable."""
if self.custom_name != self.canonical_name:
if index == 0:
return self.custom_name
else:
index -= 1
if index == 0:
return self.canonical_name
else:
return self.aliases[index - 1]
@staticmethod
def _colons(id, name):
"""Return the name with required colons, depending on the ID."""
if id == 0:
return u':'
elif id in (6, 14):
return u':' + name + u':'
else:
return u'' + name + u':'
def __str__(self):
"""Return the canonical string representation."""
return self.canonical_prefix()
def __unicode__(self):
"""Return the custom string representation."""
return self.custom_prefix()
def canonical_prefix(self):
"""Return the canonical name with required colons."""
return Namespace._colons(self.id, self.canonical_name)
def custom_prefix(self):
"""Return the custom name with required colons."""
return Namespace._colons(self.id, self.custom_name)
def __int__(self):
"""Return the namespace id."""
return self.id
def __index__(self):
"""Return the namespace id."""
return self.id
def __hash__(self):
"""Return the namespace id."""
return self.id
def __eq__(self, other):
"""Compare whether two namespace objects are equal."""
if isinstance(other, int):
return self.id == other
elif isinstance(other, Namespace):
return self.id == other.id
elif isinstance(other, basestring):
return other in self
def __ne__(self, other):
"""Compare whether two namespace objects are not equal."""
return not self.__eq__(other)
def __mod__(self, other):
"""Apply modulo on the namespace id."""
return self.id.__mod__(other)
def __sub__(self, other):
"""Apply subtraction on the namespace id."""
return -(other) + self.id
def __add__(self, other):
"""Apply addition on the namespace id."""
return other + self.id
def _cmpkey(self):
"""Return the ID as a comparison key."""
return self.id
def __repr__(self):
"""Return a reconstructable representation."""
standard_attr = ['id', 'custom_name', 'canonical_name', 'aliases']
extra = [(key, self.__dict__[key])
for key in sorted(self.__dict__)
if key not in standard_attr]
if extra:
kwargs = ', ' + ', '.join([key + '=' + repr(value)
for (key, value) in
extra])
else:
kwargs = ''
return '%s(id=%d, custom_name=%r, canonical_name=%r, aliases=%r%s)' \
% (self.__class__.__name__, self.id, self.custom_name,
self.canonical_name, self.aliases, kwargs)
@staticmethod
def default_case(id, default_case=None):
"""Return the default fixed case value for the namespace ID."""
# https://www.mediawiki.org/wiki/Manual:$wgCapitalLinkOverrides#Warning
if id > 0 and id % 2 == 1: # the talk ns has the non-talk ns case
id -= 1
if id in (-1, 2, 8):
return 'first-letter'
else:
return default_case
@classmethod
def builtin_namespaces(cls, use_image_name=False, case='first-letter'):
"""Return a dict of the builtin namespaces."""
return dict((i, cls(i, use_image_name=use_image_name,
case=cls.default_case(i, case)))
for i in range(-2, 16))
@staticmethod
def normalize_name(name):
"""
Remove an optional colon before and after name.
TODO: reject illegal characters.
"""
if name == '':
return ''
parts = name.split(':', 4)
count = len(parts)
if count > 3:
return False
elif count == 3:
if parts[2] != '':
return False
# Discard leading colon
if count >= 2 and parts[0] == '' and parts[1]:
return parts[1].strip()
elif parts[0]:
return parts[0].strip()
return False
@classmethod
@deprecated('NamespacesDict.lookup_name')
def lookup_name(cls, name, namespaces=None):
"""
Find the Namespace for a name.
@param name: Name of the namespace.
@type name: basestring
@param namespaces: namespaces to search
default: builtins only
@type namespaces: dict of Namespace
@rtype: Namespace or None
"""
if not namespaces:
namespaces = cls.builtin_namespaces()
return NamespacesDict._lookup_name(name, namespaces)
@staticmethod
@deprecated('NamespacesDict.resolve')
def resolve(identifiers, namespaces=None):
"""
Resolve namespace identifiers to obtain Namespace objects.
Identifiers may be any value for which int() produces a valid
namespace id, except bool, or any string which Namespace.lookup_name
successfully finds. A numerical string is resolved as an integer.
@param identifiers: namespace identifiers
@type identifiers: iterable of basestring or Namespace key,
or a single instance of those types
@param namespaces: namespaces to search (default: builtins only)
@type namespaces: dict of Namespace
@return: list of Namespace objects in the same order as the
identifiers
@rtype: list
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
if not namespaces:
namespaces = Namespace.builtin_namespaces()
return NamespacesDict._resolve(identifiers, namespaces)
class NamespacesDict(Mapping, SelfCallMixin):
"""
An immutable dictionary containing the Namespace instances.
It adds a deprecation message when called as the 'namespaces' property of
APISite was callable.
"""
_own_desc = 'the namespaces property'
def __init__(self, namespaces):
"""Create new dict using the given namespaces."""
super(NamespacesDict, self).__init__()
self._namespaces = namespaces
self._namespace_names = {}
for namespace in self._namespaces.values():
for name in namespace:
self._namespace_names[name.lower()] = namespace
def __iter__(self):
"""Iterate over all namespaces."""
return iter(self._namespaces)
def __getitem__(self, key):
"""
Get the namespace with the given key.
@param key: namespace key
@type key: Namespace, int or str
@rtype: Namespace
"""
if isinstance(key, (Namespace, int)):
return self._namespaces[key]
else:
namespace = self.lookup_name(key)
if namespace:
return namespace
return super(NamespacesDict, self).__getitem__(key)
def __getattr__(self, attr):
"""
Get the namespace with the given key.
@param key: namespace key
@type key: Namespace, int or str
@rtype: Namespace
"""
# lookup_name access _namespaces
if attr.isupper():
if attr == 'MAIN':
return self[0]
namespace = self.lookup_name(attr)
if namespace:
return namespace
return self.__getattribute__(attr)
def __len__(self):
"""Get the number of namespaces."""
return len(self._namespaces)
def lookup_name(self, name):
"""
Find the Namespace for a name also checking aliases.
@param name: Name of the namespace.
@type name: basestring
@rtype: Namespace or None
"""
name = Namespace.normalize_name(name)
if name is False:
return None
return self.lookup_normalized_name(name.lower())
def lookup_normalized_name(self, name):
"""
Find the Namespace for a name also checking aliases.
The name has to be normalized and must be lower case.
@param name: Name of the namespace.
@type name: basestring
@rtype: Namespace or None
"""
return self._namespace_names.get(name)
# Temporary until Namespace.lookup_name can be removed
@staticmethod
def _lookup_name(name, namespaces):
name = Namespace.normalize_name(name)
if name is False:
return None
name = name.lower()
for namespace in namespaces.values():
if namespace._contains_lowercase_name(name):
return namespace
return None
def resolve(self, identifiers):
"""
Resolve namespace identifiers to obtain Namespace objects.
Identifiers may be any value for which int() produces a valid
namespace id, except bool, or any string which Namespace.lookup_name
successfully finds. A numerical string is resolved as an integer.
@param identifiers: namespace identifiers
@type identifiers: iterable of basestring or Namespace key,
or a single instance of those types
@return: list of Namespace objects in the same order as the
identifiers
@rtype: list
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
return self._resolve(identifiers, self._namespaces)
# Temporary until Namespace.resolve can be removed
@staticmethod
def _resolve(identifiers, namespaces):
if isinstance(identifiers, (basestring, Namespace)):
identifiers = [identifiers]
else:
# convert non-iterators to single item list
try:
iter(identifiers)
except TypeError:
identifiers = [identifiers]
# lookup namespace names, and assume anything else is a key.
# int(None) raises TypeError; however, bool needs special handling.
result = [NotImplemented if isinstance(ns, bool) else
NamespacesDict._lookup_name(ns, namespaces)
if isinstance(ns, basestring) and
not ns.lstrip('-').isdigit() else
namespaces[int(ns)] if int(ns) in namespaces
else None
for ns in identifiers]
if NotImplemented in result:
raise TypeError('identifiers contains inappropriate types: %r'
% identifiers)
# Namespace.lookup_name returns None if the name is not recognised
if None in result:
raise KeyError(u'Namespace identifier(s) not recognised: %s'
% u','.join([str(identifier) for identifier, ns
in zip(identifiers, result)
if ns is None]))
return result
class _IWEntry(object):
"""An entry of the _InterwikiMap with a lazy loading site."""
def __init__(self, local, url):
self._site = None
self.local = local
self.url = url
@property
def site(self):
if self._site is None:
try:
self._site = pywikibot.Site(url=self.url)
except Exception as e:
self._site = e
return self._site
class _InterwikiMap(object):
"""A representation of the interwiki map of a site."""
def __init__(self, site):
"""Create an empty uninitalized interwiki map for the given site."""
super(_InterwikiMap, self).__init__()
self._site = site
self._map = None
def reset(self):
"""Remove all mappings to force building a new mapping."""
self._map = None
@property
def _iw_sites(self):
"""Fill the interwikimap cache with the basic entries."""
# _iw_sites is a local cache to return a APISite instance depending
# on the interwiki prefix of that site
if self._map is None:
self._map = dict((iw['prefix'], _IWEntry('local' in iw, iw['url']))
for iw in self._site.siteinfo['interwikimap'])
return self._map
def __getitem__(self, prefix):
"""Return the site, locality and url for the requested prefix."""
if prefix not in self._iw_sites:
raise KeyError(u"'{0}' is not an interwiki prefix.".format(prefix))
if isinstance(self._iw_sites[prefix].site, BaseSite):
return self._iw_sites[prefix]
elif isinstance(self._iw_sites[prefix].site, Exception):
raise self._iw_sites[prefix].site
else:
raise TypeError('_iw_sites[%s] is wrong type: %s'
% (prefix, type(self._iw_sites[prefix].site)))
def get_by_url(self, url):
"""Return a set of prefixes applying to the URL."""
return set(prefix for prefix, iw_entry in self._iw_sites
if iw_entry.url == url)
class BaseSite(ComparableMixin):
"""Site methods that are independent of the communication interface."""
def __init__(self, code, fam=None, user=None, sysop=None):
"""
Constructor.
@param code: the site's language code
@type code: str
@param fam: wiki family name (optional)
@type fam: str or Family
@param user: bot user name (optional)
@type user: str
@param sysop: sysop account user name (optional)
@type sysop: str
"""
if code.lower() != code:
# Note the Site function in __init__ also emits a UserWarning
# for this condition, showing the callers file and line no.
pywikibot.log(u'BaseSite: code "%s" converted to lowercase' % code)
code = code.lower()
if not all(x in pywikibot.family.CODE_CHARACTERS for x in str(code)):
pywikibot.log(u'BaseSite: code "%s" contains invalid characters'
% code)
self.__code = code
if isinstance(fam, basestring) or fam is None:
self.__family = pywikibot.family.Family.load(fam)
else:
self.__family = fam
self.obsolete = False
# if we got an outdated language code, use the new one instead.
if self.__code in self.__family.obsolete:
if self.__family.obsolete[self.__code] is not None:
self.__code = self.__family.obsolete[self.__code]
# Note the Site function in __init__ emits a UserWarning
# for this condition, showing the callers file and line no.
pywikibot.log(u'Site %s instantiated using code %s'
% (self, code))
else:
# no such language anymore
self.obsolete = True
pywikibot.log(u'Site %s instantiated and marked "obsolete" '
u'to prevent access' % self)
elif self.__code not in self.languages():
if self.__family.name in list(self.__family.langs.keys()) and \
len(self.__family.langs) == 1:
self.__code = self.__family.name
if self.__family == pywikibot.config.family \
and code == pywikibot.config.mylang:
pywikibot.config.mylang = self.__code
warn(u'Global configuration variable "mylang" changed to '
u'"%s" while instantiating site %s'
% (self.__code, self), UserWarning)
else:
raise UnknownSite(u"Language '%s' does not exist in family %s"
% (self.__code, self.__family.name))
self._username = [normalize_username(user), normalize_username(sysop)]
self.use_hard_category_redirects = (
self.code in self.family.use_hard_category_redirects)
# following are for use with lock_page and unlock_page methods
self._pagemutex = threading.Lock()
self._locked_pages = []
@deprecated
def has_api(self):
"""Return whether this site has an API."""
return False
@property
@deprecated(
"APISite.siteinfo['case'] or Namespace.case == 'case-sensitive'")
def nocapitalize(self):
"""
Return whether this site's default title case is case-sensitive.
DEPRECATED.
"""
return self.siteinfo['case'] == 'case-sensitive'
@property
def throttle(self):
"""Return this Site's throttle. Initialize a new one if needed."""
if not hasattr(self, "_throttle"):
self._throttle = Throttle(self, multiplydelay=True)
return self._throttle
@property
def family(self):
"""The Family object for this Site's wiki family."""
return self.__family
@property
def code(self):
"""
The identifying code for this Site equal to the wiki prefix.
By convention, this is usually an ISO language code, but it does
not have to be.
"""
return self.__code
@property
def lang(self):
"""The ISO language code for this Site.
Presumed to be equal to the site code, but this can be overridden.
"""
return self.__code
@property
def doc_subpage(self):
"""
Return the documentation subpage for this Site.
@rtype: tuple
"""
if not hasattr(self, '_doc_subpage'):
try:
doc, codes = self.family.doc_subpages.get('_default', ((), []))
if self.code not in codes:
try:
doc = self.family.doc_subpages[self.code]
# Language not defined in doc_subpages in x_family.py file
# It will use default for the family.
# should it just raise an Exception and fail?
# this will help to check the dictionary ...
except KeyError:
warn(u"Site {0} has no language defined in "
u"doc_subpages dict in {1}_family.py file"
.format(self, self.family.name),
FamilyMaintenanceWarning, 2)
# doc_subpages not defined in x_family.py file
except AttributeError:
doc = () # default
warn(u"Site {0} has no doc_subpages dict in {1}_family.py file"
.format(self, self.family.name),
FamilyMaintenanceWarning, 2)
self._doc_subpage = doc
return self._doc_subpage
def _cmpkey(self):
"""Perform equality and inequality tests on Site objects."""
return (self.family.name, self.code)
def __getstate__(self):
"""Remove Lock based classes before pickling."""
new = self.__dict__.copy()
del new['_pagemutex']
if '_throttle' in new:
del new['_throttle']
# site cache contains exception information, which cant be pickled
if '_iw_sites' in new:
del new['_iw_sites']
return new
def __setstate__(self, attrs):
"""Restore things removed in __getstate__."""
self.__dict__.update(attrs)
self._pagemutex = threading.Lock()
def user(self):
"""Return the currently-logged in bot user, or None."""
if self.logged_in(True):
return self._username[True]
elif self.logged_in(False):
return self._username[False]
def username(self, sysop=False):
"""Return the username/sysopname used for the site."""
return self._username[sysop]
def __getattr__(self, attr):
"""Delegate undefined methods calls to the Family object."""
if hasattr(self.__class__, attr):
return getattr(self.__class__, attr)
try:
method = getattr(self.family, attr)
f = functools.partial(method, self.code)
if hasattr(method, "__doc__"):
f.__doc__ = method.__doc__
return f
except AttributeError:
raise AttributeError("%s instance has no attribute '%s'"
% (self.__class__.__name__, attr))
def __str__(self):
"""Return string representing this Site's name and code."""
return self.family.name + ':' + self.code
@property
def sitename(self):
"""String representing this Site's name and code."""
return SelfCallString(self.__str__())
def __repr__(self):
"""Return internal representation."""
return '{0}("{1}", "{2}")'.format(
self.__class__.__name__, self.code, self.family)
def __hash__(self):
"""Return hashable key."""
return hash(repr(self))
def languages(self):
"""Return list of all valid language codes for this site's Family."""
return list(self.family.langs.keys())
def validLanguageLinks(self):
"""Return list of language codes to be used in interwiki links."""
return [lang for lang in self.languages()
if self.namespaces.lookup_normalized_name(lang) is None]
def _interwiki_urls(self, only_article_suffixes=False):
base_path = self.path()
if not only_article_suffixes:
yield base_path
yield base_path + '/'
yield base_path + '?title='
yield self.article_path
def interwiki(self, prefix):
"""
Return the site for a corresponding interwiki prefix.
@raises SiteDefinitionError: if the url given in the interwiki table
doesn't match any of the existing families.
@raises KeyError: if the prefix is not an interwiki prefix.
"""
return self._interwikimap[prefix].site
def interwiki_prefix(self, site):
"""
Return the interwiki prefixes going to that site.
The interwiki prefixes are ordered first by length (shortest first)
and then alphabetically. L{interwiki(prefix)} is not guaranteed to
equal C{site} (i.e. the parameter passed to this function).
@param site: The targeted site, which might be it's own.
@type site: L{BaseSite}
@return: The interwiki prefixes
@rtype: list (guaranteed to be not empty)
@raises KeyError: if there is no interwiki prefix for that site.
"""
assert site is not None, 'Site must not be None'
prefixes = set()
for url in site._interwiki_urls():
prefixes.update(self._interwikimap.get_by_url(url))
if not prefixes:
raise KeyError(
u"There is no interwiki prefix to '{0}'".format(site))
return sorted(prefixes, key=lambda p: (len(p), p))
def local_interwiki(self, prefix):
"""
Return whether the interwiki prefix is local.
A local interwiki prefix is handled by the target site like a normal
link. So if that link also contains an interwiki link it does follow
it as long as it's a local link.
@raises SiteDefinitionError: if the url given in the interwiki table
doesn't match any of the existing families.
@raises KeyError: if the prefix is not an interwiki prefix.
"""
return self._interwikimap[prefix].local
@deprecated('APISite.namespaces.lookup_name')
def ns_index(self, namespace):
"""
Return the Namespace for a given namespace name.
@param namespace: name
@type namespace: unicode
@return: The matching Namespace object on this Site
@rtype: Namespace, or None if invalid
"""
return self.namespaces.lookup_name(namespace)
@deprecated('APISite.namespaces.lookup_name')
def getNamespaceIndex(self, namespace):
"""DEPRECATED: Return the Namespace for a given namespace name."""
return self.namespaces.lookup_name(namespace)
def _build_namespaces(self):
"""Create default namespaces."""
use_image_name = MediaWikiVersion(
self.version()) < MediaWikiVersion('1.14')
return Namespace.builtin_namespaces(use_image_name)
@property
def namespaces(self):
"""Return dict of valid namespaces on this wiki."""
if not hasattr(self, '_namespaces'):
self._namespaces = NamespacesDict(self._build_namespaces())
return self._namespaces
def ns_normalize(self, value):
"""
Return canonical local form of namespace name.
@param value: A namespace name
@type value: unicode
"""
index = self.namespaces.lookup_name(value)
return self.namespace(index)
# for backwards-compatibility
normalizeNamespace = redirect_func(ns_normalize,
old_name='normalizeNamespace',
class_name='BaseSite')
@remove_last_args(('default', ))
def redirect(self):
"""Return list of localized redirect tags for the site."""
return [u"REDIRECT"]
@remove_last_args(('default', ))
def pagenamecodes(self):
"""Return list of localized PAGENAME tags for the site."""
return [u"PAGENAME"]
@remove_last_args(('default', ))
def pagename2codes(self):
"""Return list of localized PAGENAMEE tags for the site."""
return [u"PAGENAMEE"]
def lock_page(self, page, block=True):
"""
Lock page for writing. Must be called before writing any page.
We don't want different threads trying to write to the same page
at the same time, even to different sections.
@param page: the page to be locked
@type page: pywikibot.Page
@param block: if true, wait until the page is available to be locked;
otherwise, raise an exception if page can't be locked
"""
self._pagemutex.acquire()
try:
while page.title(withSection=False) in self._locked_pages:
if not block:
raise PageInUse(page.title(withSection=False))
time.sleep(.25)
self._locked_pages.append(page.title(withSection=False))
finally:
self._pagemutex.release()
def unlock_page(self, page):
"""
Unlock page. Call as soon as a write operation has completed.
@param page: the page to be locked
@type page: pywikibot.Page
"""
self._pagemutex.acquire()
try:
self._locked_pages.remove(page.title(withSection=False))
finally:
self._pagemutex.release()
def disambcategory(self):
"""Return Category in which disambig pages are listed."""
if self.has_data_repository:
repo = self.data_repository()
repo_name = repo.family.name
try:
item = self.family.disambcatname[repo.code]
except KeyError:
raise Error(
'No {repo} qualifier found for disambiguation category '
'name in {fam}_family file'.format(repo=repo_name,
fam=self.family.name))
else:
dp = pywikibot.ItemPage(repo, item)
try:
name = dp.getSitelink(self)
except pywikibot.NoPage:
raise Error(
'No disambiguation category name found in {repo} '
'for {site}'.format(repo=repo_name, site=self))
else: # fallback for non WM sites
try:
name = '%s:%s' % (Namespace.CATEGORY,
self.family.disambcatname[self.code])
except KeyError:
raise Error(
'No disambiguation category name found in '
'{site.family.name}_family for {site}'.format(site=self))
return pywikibot.Category(pywikibot.Link(name, self))
@deprecated("pywikibot.Link")
def linkto(self, title, othersite=None):
"""DEPRECATED. Return a wikilink to a page.
@param title: Title of the page to link to
@type title: unicode
@param othersite: Generate a interwiki link for use on this site.
@type othersite: Site (optional)
@rtype: unicode
"""
return pywikibot.Link(title, self).astext(othersite)
def isInterwikiLink(self, text):
"""Return True if text is in the form of an interwiki link.
If a link object constructed using "text" as the link text parses as
belonging to a different site, this method returns True.
"""
linkfam, linkcode = pywikibot.Link(text, self).parse_site()
return linkfam != self.family.name or linkcode != self.code
def redirectRegex(self, pattern=None):
"""Return a compiled regular expression matching on redirect pages.
Group 1 in the regex match object will be the target title.
"""
if pattern is None:
pattern = "REDIRECT"
# A redirect starts with hash (#), followed by a keyword, then
# arbitrary stuff, then a wikilink. The wikilink may contain
# a label, although this is not useful.
return re.compile(r'\s*#%(pattern)s\s*:?\s*\[\[(.+?)(?:\|.*?)?\]\]'
% {'pattern': pattern},
re.IGNORECASE | re.UNICODE | re.DOTALL)
def sametitle(self, title1, title2):
"""
Return True if title1 and title2 identify the same wiki page.
title1 and title2 may be unequal but still identify the same page,
if they use different aliases for the same namespace.
"""
def ns_split(title):
"""Separate the namespace from the name."""
ns, delim, name = title.partition(':')
if delim:
ns = self.namespaces.lookup_name(ns)
if not delim or not ns:
return default_ns, title
else:
return ns, name
if title1 == title2:
return True
# Replace underscores with spaces and multiple combinations of them
# with only one space
title1 = re.sub(r'[_ ]+', ' ', title1)
title2 = re.sub(r'[_ ]+', ' ', title2)
if title1 == title2:
return True
default_ns = self.namespaces[0]
# determine whether titles contain namespace prefixes
ns1_obj, name1 = ns_split(title1)
ns2_obj, name2 = ns_split(title2)
if ns1_obj != ns2_obj:
# pages in different namespaces
return False
name1 = name1.strip()
name2 = name2.strip()
# If the namespace has a case definition it's overriding the site's
# case definition
if ns1_obj.case == 'first-letter':
name1 = first_upper(name1)
name2 = first_upper(name2)
return name1 == name2
# namespace shortcuts for backwards-compatibility
@deprecated('namespaces.SPECIAL.custom_name')
def special_namespace(self):
"""Return local name for the Special: namespace."""
return self.namespace(-1)
@deprecated('namespaces.FILE.custom_name')
def image_namespace(self):
"""Return local name for the File namespace."""
return self.namespace(6)
@deprecated('namespaces.MEDIAWIKI.custom_name')
def mediawiki_namespace(self):
"""Return local name for the MediaWiki namespace."""
return self.namespace(8)
@deprecated('namespaces.TEMPLATE.custom_name')
def template_namespace(self):
"""Return local name for the Template namespace."""
return self.namespace(10)
@deprecated('namespaces.CATEGORY.custom_name')
def category_namespace(self):
"""Return local name for the Category namespace."""
return self.namespace(14)
@deprecated('list(namespaces.CATEGORY)')
def category_namespaces(self):
"""Return names for the Category namespace."""
return list(self.namespace(14, all=True))
# site-specific formatting preferences
def category_on_one_line(self):
# TODO: is this even needed? No family in the framework uses it.
"""Return True if this site wants all category links on one line."""
return self.code in self.family.category_on_one_line
def interwiki_putfirst(self):
"""Return list of language codes for ordering of interwiki links."""
return self.family.interwiki_putfirst.get(self.code, None)
def getSite(self, code):
"""Return Site object for language 'code' in this Family."""
return pywikibot.Site(code=code, fam=self.family, user=self.user())
# deprecated methods for backwards-compatibility
@deprecated("family attribute")
def fam(self):
"""Return Family object for this Site."""
return self.family
@deprecated("pywikibot.data.api.encode_url")
def urlEncode(self, query):
"""DEPRECATED."""
return api.encode_url(query)
@deprecated('pywikibot.data.api.Request or pywikibot.comms.http.request')
@deprecated_args(compress=None, no_hostname=None, cookies_only=None,
refer=None, back_response=None)
def getUrl(self, path, retry=None, sysop=None, data=None):
"""DEPRECATED.
Retained for compatibility only. All arguments except path and data
are ignored.
"""
if retry is not None or sysop is not None:
warn('APISite.getUrl parameters retry and sysop are not supported',
UserWarning)
from pywikibot.comms import http
if data:
if not isinstance(data, basestring):
data = urlencode(data)
return http.request(self, path, method="PUT", body=data)
else:
return http.request(self, path)
@deprecated
def postForm(self, address, predata, sysop=False, cookies=None):
"""DEPRECATED."""
return self.getUrl(address, data=predata)
@deprecated
def postData(self, address, data, contentType=None, sysop=False,
compress=True, cookies=None):
"""DEPRECATED."""
return self.getUrl(address, data=data)
@deprecated
def checkCharset(self, charset):
"""DEPRECATED."""
raise NotImplementedError
@deprecated
def cookies(self, sysop=False):
"""DEPRECATED."""
raise NotImplementedError
@deprecated
def updateCookies(self, datas, sysop=False):
"""DEPRECATED."""
raise NotImplementedError
@deprecated
def solveCaptcha(self, data):
"""DEPRECATED."""
raise NotImplementedError
def must_be(group=None, right=None):
"""Decorator to require a certain user status when method is called.
@param group: The group the logged in user should belong to
this parameter can be overridden by
keyword argument 'as_group'.
@type group: str ('user' or 'sysop')
@param right: The rights the logged in user should have.
Not supported yet and thus ignored.
@return: method decorator
"""
def decorator(fn):
def callee(self, *args, **kwargs):
if self.obsolete:
raise UnknownSite("Language %s in family %s is obsolete"
% (self.code, self.family.name))
grp = kwargs.pop('as_group', group)
if grp == 'user':
self.login(False)
elif grp == 'sysop':
self.login(True)
else:
raise Exception("Not implemented")
return fn(self, *args, **kwargs)
if not __debug__:
return fn
manage_wrapping(callee, fn)
return callee
return decorator
def need_version(version):
"""Decorator to require a certain MediaWiki version number.
@param version: the mw version number required
@type version: str
@return: a decorator to make sure the requirement is satisfied when
the decorated function is called.
"""
def decorator(fn):
def callee(self, *args, **kwargs):
if MediaWikiVersion(self.version()) < MediaWikiVersion(version):
raise NotImplementedError(
u'Method or function "%s"\n'
u"isn't implemented in MediaWiki version < %s"
% (fn.__name__, version))
return fn(self, *args, **kwargs)
if not __debug__:
return fn
manage_wrapping(callee, fn)
return callee
return decorator
def need_extension(extension):
"""Decorator to require a certain MediaWiki extension.
@param extension: the MediaWiki extension required
@type extension: unicode
@return: a decorator to make sure the requirement is satisfied when
the decorated function is called.
"""
def decorator(fn):
def callee(self, *args, **kwargs):
if not self.has_extension(extension):
raise UnknownExtension(
'Method "%s" is not implemented without the extension %s'
% (fn.__name__, extension))
return fn(self, *args, **kwargs)
if not __debug__:
return fn
manage_wrapping(callee, fn)
return callee
return decorator
class Siteinfo(Container):
"""
A 'dictionary' like container for siteinfo.
This class queries the server to get the requested siteinfo property.
Optionally it can cache this directly in the instance so that later
requests don't need to query the server.
All values of the siteinfo property 'general' are directly available.
"""
WARNING_REGEX = re.compile(r'^Unrecognized values? for parameter '
r'["\']siprop["\']: (.+?)\.?$')
# Until we get formatversion=2, we have to convert empty-string properties
# into booleans so they are easier to use.
BOOLEAN_PROPS = {
'general': [
'imagewhitelistenabled',
'langconversion',
'titleconversion',
'rtl',
'readonly',
'writeapi',
'variantarticlepath',
'misermode',
'uploadsenabled',
],
'namespaces': [ # for each namespace
'subpages',
'content',
'nonincludable',
],
'magicwords': [ # for each magicword
'case-sensitive',
],
}
def __init__(self, site):
"""Initialise it with an empty cache."""
self._site = site
self._cache = {}
@staticmethod
def _get_default(key):
"""
Return the default value for different properties.
If the property is 'restrictions' it returns a dictionary with:
- 'cascadinglevels': 'sysop'
- 'semiprotectedlevels': 'autoconfirmed'
- 'levels': '' (everybody), 'autoconfirmed', 'sysop'
- 'types': 'create', 'edit', 'move', 'upload'
Otherwise it returns L{pywikibot.tools.EMPTY_DEFAULT}.
@param key: The property name
@type key: str
@return: The default value
@rtype: dict or L{pywikibot.tools.EmptyDefault}
"""
if key == 'restrictions':
# implemented in b73b5883d486db0e9278ef16733551f28d9e096d
return {
'cascadinglevels': ['sysop'],
'semiprotectedlevels': ['autoconfirmed'],
'levels': ['', 'autoconfirmed', 'sysop'],
'types': ['create', 'edit', 'move', 'upload']
}
elif key == 'fileextensions':
# the default file extensions in MediaWiki
return [{'ext': ext} for ext in ['png', 'gif', 'jpg', 'jpeg']]
else:
return pywikibot.tools.EMPTY_DEFAULT
def _post_process(self, prop, data):
"""Do some default handling of data. Directly modifies data."""
# Be careful with version tests inside this here as it might need to
# query this method to actually get the version number
if prop == 'general':
if 'articlepath' not in data: # Introduced in 1.16.0
# Old version of MediaWiki, extract from base
path = urlparse(data['base'])[2].rsplit('/', 1)[0] + '/$1'
data['articlepath'] = path
# Convert boolean props from empty strings to actual boolean values
if prop in Siteinfo.BOOLEAN_PROPS.keys():
# siprop=namespaces and
# magicwords has properties per item in result
if prop == 'namespaces' or prop == 'magicwords':
for index, value in enumerate(data):
# namespaces uses a dict, while magicwords uses a list
key = index if type(data) is list else value
for p in Siteinfo.BOOLEAN_PROPS[prop]:
if p in data[key]:
data[key][p] = True
else:
data[key][p] = False
else:
for p in Siteinfo.BOOLEAN_PROPS[prop]:
if p in data:
data[p] = True
else:
data[p] = False
def _get_siteinfo(self, prop, expiry):
"""
Retrieve a siteinfo property.
All properties which the site doesn't
support contain the default value. Because pre-1.12 no data was
returned when a property doesn't exists, it queries each property
independetly if a property is invalid.
@param prop: The property names of the siteinfo.
@type prop: str or iterable
@param expiry: The expiry date of the cached request.
@type expiry: int (days), L{datetime.timedelta}, False (config)
@return: A dictionary with the properties of the site. Each entry in
the dictionary is a tuple of the value and a boolean to save if it
is the default value.
@rtype: dict (the values)
@see: U{https://www.mediawiki.org/wiki/API:Meta#siteinfo_.2F_si}
"""
def warn_handler(mod, message):
"""Return True if the warning is handled."""
matched = Siteinfo.WARNING_REGEX.match(message)
if mod == 'siteinfo' and matched:
invalid_properties.extend(
prop.strip() for prop in matched.group(1).split(','))
return True
else:
return False
if isinstance(prop, basestring):
props = [prop]
else:
props = prop
if len(props) == 0:
raise ValueError('At least one property name must be provided.')
invalid_properties = []
try:
request = self._site._request(
expiry=pywikibot.config.API_config_expiry
if expiry is False else expiry,
parameters={
'action': 'query', 'meta': 'siteinfo', 'siprop': props,
}
)
# With 1.25wmf5 it'll require continue or rawcontinue. As we don't
# continue anyway we just always use continue.
request['continue'] = True
# warnings are handled later
request._warning_handler = warn_handler
data = request.submit()
except api.APIError as e:
if e.code == 'siunknown_siprop':
if len(props) == 1:
pywikibot.log(
"Unable to get siprop '{0}'".format(props[0]))
return {props[0]: (Siteinfo._get_default(props[0]), False)}
else:
pywikibot.log(u"Unable to get siteinfo, because at least "
u"one property is unknown: '{0}'".format(
"', '".join(props)))
results = {}
for prop in props:
results.update(self._get_siteinfo(prop, expiry))
return results
else:
raise
else:
result = {}
if invalid_properties:
for prop in invalid_properties:
result[prop] = (Siteinfo._get_default(prop), False)
pywikibot.log(u"Unable to get siprop(s) '{0}'".format(
u"', '".join(invalid_properties)))
if 'query' in data:
cache_time = datetime.datetime.utcnow()
for prop in props:
if prop in data['query']:
self._post_process(prop, data['query'][prop])
result[prop] = (data['query'][prop], cache_time)
return result
@staticmethod
def _is_expired(cache_date, expire):
"""Return true if the cache date is expired."""
if expire is False: # can never expire
return False
elif not cache_date: # default values are always expired
return True
else:
# cached date + expiry are in the past if it's expired
return cache_date + expire < datetime.datetime.utcnow()
def _get_general(self, key, expiry):
"""
Return a siteinfo property which is loaded by default.
The property 'general' will be queried if it wasn't yet or it's forced.
Additionally all uncached default properties are queried. This way
multiple default properties are queried with one request. It'll cache
always all results.
@param key: The key to search for.
@type key: str
@param expiry: If the cache is older than the expiry it ignores the
cache and queries the server to get the newest value.
@type expiry: int (days), L{datetime.timedelta}, False (never)
@return: If that property was retrived via this method. Returns None if
the key was not in the retreived values.
@rtype: various (the value), bool (if the default value is used)
"""
if 'general' not in self._cache:
pywikibot.debug('general siteinfo not loaded yet.', _logger)
force = True
props = ['namespaces', 'namespacealiases']
else:
force = Siteinfo._is_expired(self._cache['general'][1], expiry)
props = []
if force:
props = [prop for prop in props if prop not in self._cache]
if props:
pywikibot.debug(
"Load siteinfo properties '{0}' along with 'general'"
.format("', '".join(props)), _logger)
props += ['general']
default_info = self._get_siteinfo(props, expiry)
for prop in props:
self._cache[prop] = default_info[prop]
if key in default_info:
return default_info[key]
if key in self._cache['general'][0]:
return self._cache['general'][0][key], self._cache['general']
else:
return None
def __getitem__(self, key):
"""Return a siteinfo property, caching and not forcing it."""
return self.get(key, False) # caches and doesn't force it
def get(self, key, get_default=True, cache=True, expiry=False):
"""
Return a siteinfo property.
It will never throw an APIError if it only stated, that the siteinfo
property doesn't exist. Instead it will use the default value.
@param key: The name of the siteinfo property.
@type key: str
@param get_default: Whether to throw an KeyError if the key is invalid.
@type get_default: bool
@param cache: Caches the result interally so that future accesses via
this method won't query the server.
@type cache: bool
@param expiry: If the cache is older than the expiry it ignores the
cache and queries the server to get the newest value.
@type expiry: int/float (days), L{datetime.timedelta}, False (never)
@return: The gathered property
@rtype: various
@raises KeyError: If the key is not a valid siteinfo property and the
get_default option is set to False.
@see: L{_get_siteinfo}
"""
# expire = 0 (or timedelta(0)) are always expired and their bool is
# False, so skip them EXCEPT if it's literally False, then they expire
# never: "expiry is False" is different than "not expiry"!
# if it's a int convert to timedelta
if expiry is not False and isinstance(expiry, (int, float)):
expiry = datetime.timedelta(expiry)
if expiry or expiry is False:
try:
cached = self._get_cached(key)
except KeyError:
cached = None
else: # cached value available
# is a default value, but isn't accepted
if not cached[1] and not get_default:
raise KeyError(key)
elif not Siteinfo._is_expired(cached[1], expiry):
return copy.deepcopy(cached[0])
preloaded = self._get_general(key, expiry)
if not preloaded:
preloaded = self._get_siteinfo(key, expiry)[key]
else:
cache = False
if not preloaded[1] and not get_default:
raise KeyError(key)
else:
if cache:
self._cache[key] = preloaded
return copy.deepcopy(preloaded[0])
def _get_cached(self, key):
"""Return the cached value or a KeyError exception if not cached."""
if 'general' in self._cache:
if key in self._cache['general'][0]:
return (self._cache['general'][0][key],
self._cache['general'][1])
else:
return self._cache[key]
raise KeyError(key)
def __contains__(self, key):
"""Return whether the value is cached."""
try:
self._get_cached(key)
except KeyError:
return False
else:
return True
def is_recognised(self, key):
"""Return if 'key' is a valid property name. 'None' if not cached."""
time = self.get_requested_time(key)
if time is None:
return None
else:
return bool(time)
def get_requested_time(self, key):
"""
Return when 'key' was successfully requested from the server.
If the property is actually in the siprop 'general' it returns the
last request from the 'general' siprop.
@param key: The siprop value or a property of 'general'.
@type key: basestring
@return: The last time the siprop of 'key' was requested.
@rtype: None (never), False (default), L{datetime.datetime} (cached)
"""
try:
return self._get_cached(key)[1]
except KeyError:
return None
def __call__(self, key='general', force=False, dump=False):
"""DEPRECATED: Return the entry for key or dump the complete cache."""
issue_deprecation_warning(
'Calling siteinfo', 'itself as a dictionary', 2
)
if not dump:
return self.get(key, expiry=0 if force else False)
else:
self.get(key, expiry=0 if force else False)
return self._cache
class TokenWallet(object):
"""Container for tokens."""
def __init__(self, site):
"""Constructor."""
self.site = site
self._tokens = {}
self.failed_cache = set() # cache unavailable tokens.
def load_tokens(self, types, all=False):
"""
Preload one or multiple tokens.
@param types: the types of token.
@type types: iterable
@param all: load all available tokens, if None only if it can be done
in one request.
@type all: bool
"""
assert self.site.user(), 'User must login in this site'
self._tokens.setdefault(self.site.user(), {}).update(
self.site.get_tokens(types, all=all))
# Preload all only the first time.
# When all=True types is extended in site.get_tokens().
# Keys not recognised as tokens, are cached so they are not requested
# any longer.
if all is not False:
for key in types:
if key not in self._tokens[self.site.user()]:
self.failed_cache.add((self.site.user(), key))
def __getitem__(self, key):
"""Get token value for the given key."""
assert self.site.user(), 'User must login in this site'
user_tokens = self._tokens.setdefault(self.site.user(), {})
# always preload all for users without tokens
failed_cache_key = (self.site.user(), key)
try:
key = self.site.validate_tokens([key])[0]
except IndexError:
raise Error(
u"Requested token '{0}' is invalid on {1} wiki."
.format(key, self.site))
if (key not in user_tokens and
failed_cache_key not in self.failed_cache):
self.load_tokens([key], all=False if user_tokens else None)
if key in user_tokens:
return user_tokens[key]
else:
# token not allowed for self.site.user() on self.site
self.failed_cache.add(failed_cache_key)
# to be changed back to a plain KeyError?
raise Error(
u"Action '{0}' is not allowed for user {1} on {2} wiki."
.format(key, self.site.user(), self.site))
def __contains__(self, key):
"""Return True if the given token name is cached."""
return key in self._tokens.setdefault(self.site.user(), {})
def __str__(self):
"""Return a str representation of the internal tokens dictionary."""
return self._tokens.__str__()
def __repr__(self):
"""Return a representation of the internal tokens dictionary."""
return self._tokens.__repr__()
class RemovedSite(BaseSite):
"""Site removed from a family."""
def __init__(self, code, fam, user=None, sysop=None):
"""Constructor."""
super(RemovedSite, self).__init__(code, fam, user, sysop)
class NonMWAPISite(BaseSite):
"""API interface to non MediaWiki sites."""
def __init__(self, url):
"""Constructor."""
self.netloc = urlparse(url).netloc
def __getattribute__(self, attr):
"""Return attribute if present else raise NotImplementedError."""
whitelist = ['__getattribute__', 'netloc']
if attr in whitelist:
return super(NonMWAPISite, self).__getattribute__(attr)
else:
raise NotImplementedError('The attribute %s has not been on '
'site %s implemented yet.'
% (attr, self.netloc))
class APISite(BaseSite):
"""
API interface to MediaWiki site.
Do not instantiate directly; use pywikibot.Site function.
"""
def __init__(self, code, fam=None, user=None, sysop=None):
"""Constructor."""
BaseSite.__init__(self, code, fam, user, sysop)
self._msgcache = {}
self._loginstatus = LoginStatus.NOT_ATTEMPTED
self._siteinfo = Siteinfo(self)
self._paraminfo = api.ParamInfo(self)
self._interwikimap = _InterwikiMap(self)
self.tokens = TokenWallet(self)
def __getstate__(self):
"""Remove TokenWallet before pickling, for security reasons."""
new = super(APISite, self).__getstate__()
del new['tokens']
del new['_interwikimap']
return new
def __setstate__(self, attrs):
"""Restore things removed in __getstate__."""
super(APISite, self).__setstate__(attrs)
self._interwikimap = _InterwikiMap(self)
self.tokens = TokenWallet(self)
@classmethod
def fromDBName(cls, dbname, site=None):
"""
Create a site from a database name using the sitematrix.
@param dbname: database name
@type dbname: str
@param site: Site to load sitematrix from. (Default meta.wikimedia.org)
@type site: APISite
@return: site object for the database name
@rtype: APISite
"""
# TODO this only works for some WMF sites
if not site:
site = pywikibot.Site('meta', 'meta')
req = site._request(expiry=datetime.timedelta(days=10),
parameters={'action': 'sitematrix'})
data = req.submit()
for key, val in data['sitematrix'].items():
if key == 'count':
continue
if 'code' in val:
lang = val['code']
for site in val['site']:
if site['dbname'] == dbname:
if site['code'] == 'wiki':
site['code'] = 'wikipedia'
return cls(lang, site['code'])
else:
for site in val:
if site['dbname'] == dbname:
return cls(site['code'], site['code'])
raise ValueError("Cannot parse a site out of %s." % dbname)
@deprecated
def has_api(self):
"""Return whether this site has an API."""
return True
@deprecated_args(step=None)
def _generator(self, gen_class, type_arg=None, namespaces=None,
total=None, **args):
"""Convenience method that returns an API generator.
All generic keyword arguments are passed as MW API parameter except for
'g_content' which is passed as a normal parameter to the generator's
constructor.
@param gen_class: the type of generator to construct (must be
a subclass of pywikibot.data.api.QueryGenerator)
@param type_arg: query type argument to be passed to generator's
constructor unchanged (not all types require this)
@type type_arg: str
@param namespaces: if not None, limit the query to namespaces in this
list
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@param total: if not None, limit the generator to yielding this many
items in total
@type total: int
@return: iterable with parameters set
@rtype: QueryGenerator
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
# TODO: Support parameters/simple modes?
req_args = {'site': self, 'parameters': args}
if 'g_content' in args:
req_args['g_content'] = args.pop('g_content')
if type_arg is not None:
gen = gen_class(type_arg, **req_args)
else:
gen = gen_class(**req_args)
if namespaces is not None:
gen.set_namespace(namespaces)
if total is not None and int(total) > 0:
gen.set_maximum_items(int(total))
return gen
def _request_class(self, kwargs):
"""
Get the appropriate class.
Inside this class kwargs use the parameters mode but QueryGenerator may
use the old kwargs mode.
"""
# This checks expiry in kwargs and not kwargs['parameters'] so it won't
# create a CachedRequest when there is an expiry in an API parameter
# and kwargs here are actually in parameters mode.
if 'expiry' in kwargs and kwargs['expiry'] is not None:
return api.CachedRequest
else:
return api.Request
def _request(self, **kwargs):
"""Create a request by forwarding all parameters directly."""
if 'expiry' in kwargs and kwargs['expiry'] is None:
del kwargs['expiry']
return self._request_class(kwargs)(site=self, **kwargs)
def _simple_request(self, **kwargs):
"""Create a request by defining all kwargs as parameters."""
return self._request_class({'parameters': kwargs}).create_simple(
site=self, **kwargs)
def logged_in(self, sysop=False):
"""Verify the bot is logged into the site as the expected user.
The expected usernames are those provided as either the user or sysop
parameter at instantiation.
@param sysop: if True, test if user is logged in as the sysop user
instead of the normal user.
@type sysop: bool
@rtype: bool
"""
if not hasattr(self, "_userinfo"):
return False
if sysop and 'sysop' not in self.userinfo['groups']:
return False
if 'name' not in self.userinfo or not self.userinfo['name']:
return False
if self.userinfo['name'] != self._username[sysop]:
return False
return True
@deprecated("Site.user()")
def loggedInAs(self, sysop=False):
"""Return the current username if logged in, otherwise return None.
DEPRECATED (use .user() method instead)
@param sysop: if True, test if user is logged in as the sysop user
instead of the normal user.
@type sysop: bool
@rtype: bool
"""
return self.logged_in(sysop) and self.user()
def is_oauth_token_available(self):
"""
Check whether OAuth token is set for this site.
@rtype: bool
"""
auth_token = get_authentication(self.base_url(''))
return auth_token is not None and len(auth_token) == 4
def login(self, sysop=False):
"""
Log the user in if not already logged in.
U{https://www.mediawiki.org/wiki/API:Login}
"""
# TODO: this should include an assert that loginstatus
# is not already IN_PROGRESS, however the
# login status may be left 'IN_PROGRESS' because
# of exceptions or if the first method of login
# (below) is successful. Instead, log the problem,
# to be increased to 'warning' level once majority
# of issues are resolved.
if self._loginstatus == LoginStatus.IN_PROGRESS:
pywikibot.log(
u'%r.login(%r) called when a previous login was in progress.'
% (self, sysop)
)
# There are several ways that the site may already be
# logged in, and we do not need to hit the server again.
# logged_in() is False if _userinfo exists, which means this
# will have no effect for the invocation from api.py
if self.logged_in(sysop):
self._loginstatus = (LoginStatus.AS_SYSOP
if sysop else LoginStatus.AS_USER)
return
# check whether a login cookie already exists for this user
# or check user identity when OAuth enabled
self._loginstatus = LoginStatus.IN_PROGRESS
try:
self.getuserinfo(force=True)
if self.userinfo['name'] == self._username[sysop] and \
self.logged_in(sysop):
return
# May occur if you are not logged in (no API read permissions).
except api.APIError:
pass
if self.is_oauth_token_available():
if sysop:
raise NoUsername('No sysop is permitted with OAuth')
elif self.userinfo['name'] != self._username[sysop]:
raise NoUsername('Logged in on %(site)s via OAuth as '
'%(wrong)s, but expect as %(right)s'
% {'site': self,
'wrong': self.userinfo['name'],
'right': self._username[sysop]})
else:
raise NoUsername('Logging in on %s via OAuth failed' % self)
loginMan = api.LoginManager(site=self, sysop=sysop,
user=self._username[sysop])
if loginMan.login(retry=True):
self._username[sysop] = loginMan.username
self.getuserinfo(force=True)
self._loginstatus = (LoginStatus.AS_SYSOP
if sysop else LoginStatus.AS_USER)
else:
self._loginstatus = LoginStatus.NOT_LOGGED_IN # failure
# alias for backward-compatibility
forceLogin = redirect_func(login, old_name='forceLogin',
class_name='APISite')
def _relogin(self):
"""Force a login sequence without logging out, using the current user.
This is an internal function which is used to re-login when
the internal login state does not match the state we receive
from the site.
"""
del self._userinfo
old_status = self._loginstatus
self._loginstatus = LoginStatus.NOT_LOGGED_IN
self.login(old_status)
def logout(self):
"""
Logout of the site and load details for the logged out user.
Also logs out of the global account if linked to the user.
U{https://www.mediawiki.org/wiki/API:Logout}
@raises APIError: Logout is not available when OAuth enabled.
"""
if self.is_oauth_token_available():
pywikibot.warning('Using OAuth suppresses logout function')
uirequest = self._simple_request(action='logout')
uirequest.submit()
self._loginstatus = LoginStatus.NOT_LOGGED_IN
self.getuserinfo(force=True)
def getuserinfo(self, force=False):
"""Retrieve userinfo from site and store in _userinfo attribute.
self._userinfo will be a dict with the following keys and values:
- id: user id (numeric str)
- name: username (if user is logged in)
- anon: present if user is not logged in
- groups: list of groups (could be empty)
- rights: list of rights (could be empty)
- message: present if user has a new message on talk page
- blockinfo: present if user is blocked (dict)
U{https://www.mediawiki.org/wiki/API:Userinfo}
@param force: force to retrieve userinfo ignoring cache
@type force: bool
"""
if force or not hasattr(self, '_userinfo'):
uirequest = self._simple_request(
action="query",
meta="userinfo",
uiprop="blockinfo|hasmsg|groups|rights"
)
uidata = uirequest.submit()
assert 'query' in uidata, \
"API userinfo response lacks 'query' key"
assert 'userinfo' in uidata['query'], \
"API userinfo response lacks 'userinfo' key"
self._userinfo = uidata['query']['userinfo']
return self._userinfo
userinfo = property(fget=getuserinfo, doc=getuserinfo.__doc__)
def getglobaluserinfo(self):
"""Retrieve globaluserinfo from site and cache it.
self._globaluserinfo will be a dict with the following keys and values:
- id: user id (numeric str)
- home: dbname of home wiki
- registration: registration date as Timestamp
- groups: list of groups (could be empty)
- rights: list of rights (could be empty)
- editcount: global editcount
"""
if not hasattr(self, "_globaluserinfo"):
uirequest = self._simple_request(
action="query",
meta="globaluserinfo",
guiprop="groups|rights|editcount"
)
uidata = uirequest.submit()
assert 'query' in uidata, \
"API userinfo response lacks 'query' key"
assert 'globaluserinfo' in uidata['query'], \
"API userinfo response lacks 'userinfo' key"
self._globaluserinfo = uidata['query']['globaluserinfo']
ts = self._globaluserinfo['registration']
iso_ts = pywikibot.Timestamp.fromISOformat(ts)
self._globaluserinfo['registration'] = iso_ts
return self._globaluserinfo
globaluserinfo = property(fget=getglobaluserinfo, doc=getuserinfo.__doc__)
def is_blocked(self, sysop=False):
"""
Return True when logged in user is blocked.
To check whether a user can perform an action,
the method has_right should be used.
U{https://www.mediawiki.org/wiki/API:Userinfo}
@param sysop: If true, log in to sysop account (if available)
@type sysop: bool
@rtype: bool
"""
if not self.logged_in(sysop):
self.login(sysop)
return 'blockinfo' in self._userinfo
@deprecated('has_right() or is_blocked()')
def checkBlocks(self, sysop=False):
"""
Raise an exception when the user is blocked. DEPRECATED.
@param sysop: If true, log in to sysop account (if available)
@type sysop: bool
@raises UserBlocked: The logged in user/sysop account is blocked.
"""
if self.is_blocked(sysop):
# User blocked
raise UserBlocked('User is blocked in site %s' % self)
def get_searched_namespaces(self, force=False):
"""
Retrieve the default searched namespaces for the user.
If no user is logged in, it returns the namespaces used by default.
Otherwise it returns the user preferences. It caches the last result
and returns it, if the username or login status hasn't changed.
@param force: Whether the cache should be discarded.
@return: The namespaces which are searched by default.
@rtype: C{set} of L{Namespace}
"""
# TODO: Integrate into _userinfo
if (force or not hasattr(self, '_useroptions') or
self.user() != self._useroptions['_name']):
uirequest = self._simple_request(
action="query",
meta="userinfo",
uiprop="options"
)
uidata = uirequest.submit()
assert 'query' in uidata, \
"API userinfo response lacks 'query' key"
assert 'userinfo' in uidata['query'], \
"API userinfo response lacks 'userinfo' key"
self._useroptions = uidata['query']['userinfo']['options']
# To determine if user name has changed
self._useroptions['_name'] = (
None if 'anon' in uidata['query']['userinfo'] else
uidata['query']['userinfo']['name'])
return set(ns for ns in self.namespaces.values() if ns.id >= 0 and
self._useroptions['searchNs{0}'.format(ns.id)]
in ['1', True])
@property
def article_path(self):
"""Get the nice article path without $1."""
# Assert and remove the trailing $1 and assert that it'll end in /
assert self.siteinfo['general']['articlepath'].endswith('/$1'), \
'articlepath must end with /$1'
return self.siteinfo['general']['articlepath'][:-2]
def assert_valid_iter_params(self, msg_prefix, start, end, reverse):
"""Validate iterating API parameters."""
if reverse:
if end < start:
raise Error(msg_prefix +
': end must be later than start with reverse=True')
elif start < end:
raise Error(msg_prefix +
': start must be later than end with reverse=False')
def has_right(self, right, sysop=False):
"""Return true if and only if the user has a specific right.
Possible values of 'right' may vary depending on wiki settings,
but will usually include:
* Actions: edit, move, delete, protect, upload
* User levels: autoconfirmed, sysop, bot
U{https://www.mediawiki.org/wiki/API:Userinfo}
"""
if not self.logged_in(sysop):
self.login(sysop)
return right.lower() in self._userinfo['rights']
def has_group(self, group, sysop=False):
"""Return true if and only if the user is a member of specified group.
Possible values of 'group' may vary depending on wiki settings,
but will usually include bot.
U{https://www.mediawiki.org/wiki/API:Userinfo}
"""
if not self.logged_in(sysop):
self.login(sysop)
return group.lower() in self._userinfo['groups']
def messages(self, sysop=False):
"""Return true if the user has new messages, and false otherwise."""
if not self.logged_in(sysop):
self.login(sysop)
return 'hasmsg' in self._userinfo
@need_extension('Echo')
def notifications(self, **kwargs):
"""Yield Notification objects from the Echo extension."""
params = {
'action': 'query',
'meta': 'notifications',
'notprop': 'list',
'notformat': 'text',
}
for key in kwargs:
params['not' + key] = kwargs[key]
data = self._simple_request(**params).submit()
notifications = data['query']['notifications']['list']
# Support API before 1.27.0-wmf.22
if hasattr(notifications, 'values'):
notifications = notifications.values()
for notification in notifications:
yield Notification.fromJSON(self, notification)
@need_extension('Echo')
def notifications_mark_read(self, **kwargs):
"""Mark selected notifications as read.
@return: whether the action was successful
@rtype: bool
"""
# TODO: ensure that the 'echomarkread' action
# is supported by the site
kwargs = merge_unique_dicts(kwargs, action='echomarkread',
token=self.tokens['edit'])
req = self._simple_request(**kwargs)
data = req.submit()
try:
return data['query']['echomarkread']['result'] == 'success'
except KeyError:
return False
def mediawiki_messages(self, keys):
"""Fetch the text of a set of MediaWiki messages.
If keys is '*' or ['*'], all messages will be fetched. (deprecated)
The returned dict uses each key to store the associated message.
@param keys: MediaWiki messages to fetch
@type keys: set of str, '*' or ['*']
@rtype dict
"""
if keys == '*' or keys == ['*']:
issue_deprecation_warning('mediawiki_messages("*")',
'specific messages', 2)
if not all(_key in self._msgcache for _key in keys):
parameters = {'meta': 'allmessages',
'ammessages': keys,
'amlang': self.lang,
}
msg_query = api.QueryGenerator(site=self, parameters=parameters)
for msg in msg_query:
if 'missing' not in msg:
self._msgcache[msg['name']] = msg['*']
# Return all messages
if keys == u'*' or keys == [u'*']:
return self._msgcache
else:
# Check requested keys
for key in keys:
if key not in self._msgcache:
raise KeyError("Site %s has no message '%s'"
% (self, key))
return dict((_key, self._msgcache[_key]) for _key in keys)
@deprecated_args(forceReload=None)
def mediawiki_message(self, key):
"""Fetch the text for a MediaWiki message.
@param key: name of MediaWiki message
@type key: str
@rtype unicode
"""
return self.mediawiki_messages([key])[key]
def has_mediawiki_message(self, key):
"""Determine if the site defines a MediaWiki message.
@param key: name of MediaWiki message
@type key: str
@rtype: bool
"""
return self.has_all_mediawiki_messages([key])
def has_all_mediawiki_messages(self, keys):
"""Confirm that the site defines a set of MediaWiki messages.
@param keys: names of MediaWiki messages
@type keys: set of str
@rtype: bool
"""
try:
self.mediawiki_messages(keys)
return True
except KeyError:
return False
@property
def months_names(self):
"""Obtain month names from the site messages.
The list is zero-indexed, ordered by month in calendar, and should
be in the original site language.
@return: list of tuples (month name, abbreviation)
@rtype: list
"""
if hasattr(self, "_months_names"):
return self._months_names
months_long = ['january', 'february', 'march',
'april', 'may_long', 'june',
'july', 'august', 'september',
'october', 'november', 'december']
months_short = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
months = self.mediawiki_messages(months_long + months_short)
self._months_names = []
for m_l, m_s in zip(months_long, months_short):
self._months_names.append((months[m_l], months[m_s]))
return self._months_names
def list_to_text(self, args):
"""Convert a list of strings into human-readable text.
The MediaWiki messages 'and' and 'word-separator' are used as separator
between the last two arguments.
If more than two arguments are given, other arguments are
joined using MediaWiki message 'comma-separator'.
@param args: text to be expanded
@type args: iterable of unicode
@rtype: unicode
"""
NEEDED_MW_MESSAGES = ('and', 'comma-separator', 'word-separator')
if not args:
return u''
if PY2 and any(isinstance(arg, str) for arg in args):
issue_deprecation_warning('arg of type str', 'type unicode', 2)
args = [unicode(e) for e in args]
try:
msgs = self.mediawiki_messages(NEEDED_MW_MESSAGES)
except KeyError:
raise NotImplementedError(
'MediaWiki messages missing: {0}'.format(NEEDED_MW_MESSAGES))
if MediaWikiVersion(self.version()) < MediaWikiVersion('1.16'):
for key, value in msgs.items():
if key == 'and' and value == ', and':
# v1.14 defined and as ', and'; fixed in v1.15
msgs['and'] = ' and'
else:
msgs[key] = pywikibot.html2unicode(value)
concat = msgs['and'] + msgs['word-separator']
return msgs['comma-separator'].join(
args[:-2] + [concat.join(args[-2:])])
@need_version("1.12")
@deprecated_args(string='text')
def expand_text(self, text, title=None, includecomments=None):
"""Parse the given text for preprocessing and rendering.
e.g expand templates and strip comments if includecomments
parameter is not True. Keeps text inside
<nowiki></nowiki> tags unchanges etc. Can be used to parse
magic parser words like {{CURRENTTIMESTAMP}}.
@param text: text to be expanded
@type text: unicode
@param title: page title without section
@type title: unicode
@param includecomments: if True do not strip comments
@type includecomments: bool
@rtype: unicode
"""
if not isinstance(text, basestring):
raise ValueError('text must be a string')
if not text:
return ''
req = self._simple_request(action='expandtemplates', text=text)
if title is not None:
req['title'] = title
if includecomments is True:
req['includecomments'] = u''
if MediaWikiVersion(self.version()) > MediaWikiVersion("1.24wmf7"):
key = 'wikitext'
req['prop'] = key
else:
key = '*'
return req.submit()['expandtemplates'][key]
getExpandedString = redirect_func(expand_text,
old_name='getExpandedString',
class_name='APISite')
def getcurrenttimestamp(self):
"""
Return the server time as a MediaWiki timestamp string.
It calls L{server_time} first so it queries the server to get the
current server time.
@return: the server time
@rtype: str (as 'yyyymmddhhmmss')
"""
return self.server_time().totimestampformat()
def server_time(self):
"""
Return a Timestamp object representing the current server time.
For wikis with a version newer than 1.16 it uses the 'time' property
of the siteinfo 'general'. It'll force a reload before returning the
time. It requests to expand the text '{{CURRENTTIMESTAMP}}' for older
wikis.
@return: the current server time
@rtype: L{Timestamp}
"""
if MediaWikiVersion(self.version()) >= MediaWikiVersion("1.16"):
return pywikibot.Timestamp.fromISOformat(
self.siteinfo.get('time', expiry=0))
else:
return pywikibot.Timestamp.fromtimestampformat(
self.expand_text("{{CURRENTTIMESTAMP}}"))
getcurrenttime = redirect_func(server_time, old_name='getcurrenttime',
class_name='APISite')
@need_version("1.14")
def getmagicwords(self, word):
"""Return list of localized "word" magic words for the site."""
if not hasattr(self, "_magicwords"):
magicwords = self.siteinfo.get("magicwords", cache=False)
self._magicwords = dict((item["name"], item["aliases"])
for item in magicwords)
if word in self._magicwords:
return self._magicwords[word]
else:
return [word]
@deprecated('expand_text')
def resolvemagicwords(self, wikitext):
"""
Replace the {{ns:xx}} marks in a wikitext with the namespace names.
DEPRECATED.
"""
return self.expand_text(wikitext)
@remove_last_args(('default', ))
def redirect(self):
"""Return the localized #REDIRECT keyword."""
# return the magic word without the preceding '#' character
return self.getmagicwords("redirect")[0].lstrip("#")
def redirectRegex(self):
"""Return a compiled regular expression matching on redirect pages.
Group 1 in the regex match object will be the target title.
"""
# NOTE: this is needed, since the API can give false positives!
try:
keywords = set(s.lstrip("#")
for s in self.getmagicwords("redirect"))
keywords.add("REDIRECT") # just in case
pattern = "(?:" + "|".join(keywords) + ")"
except KeyError:
# no localized keyword for redirects
pattern = None
return BaseSite.redirectRegex(self, pattern)
@remove_last_args(('default', ))
def pagenamecodes(self):
"""Return list of localized PAGENAME tags for the site."""
return self.getmagicwords("pagename")
@remove_last_args(('default', ))
def pagename2codes(self):
"""Return list of localized PAGENAMEE tags for the site."""
return self.getmagicwords("pagenamee")
def _build_namespaces(self):
_namespaces = {}
# In MW 1.14, API siprop 'namespaces' added 'canonical',
# and Image became File with Image as an alias.
# For versions lower than 1.14, APISite needs to override
# the defaults defined in Namespace.
is_mw114 = MediaWikiVersion(self.version()) >= MediaWikiVersion('1.14')
for nsdata in self.siteinfo.get('namespaces', cache=False).values():
ns = nsdata.pop('id')
custom_name = None
canonical_name = None
if ns == 0:
canonical_name = nsdata.pop('*')
custom_name = canonical_name
else:
custom_name = nsdata.pop('*')
if is_mw114:
canonical_name = nsdata.pop('canonical')
default_case = Namespace.default_case(ns)
if 'case' not in nsdata:
nsdata['case'] = default_case or self.siteinfo['case']
elif default_case is not None:
assert default_case == nsdata['case'], \
'Default case is not consistent'
namespace = Namespace(ns, canonical_name, custom_name,
use_image_name=not is_mw114,
**nsdata)
_namespaces[ns] = namespace
for item in self.siteinfo.get('namespacealiases'):
ns = int(item['id'])
try:
namespace = _namespaces[ns]
except KeyError:
pywikibot.warning(
'Broken namespace alias "{0}" (id: {1}) on {2}'.format(
item['*'], item['id'], self))
if item['*'] not in namespace:
namespace.aliases.append(item['*'])
return _namespaces
@need_version("1.14")
@deprecated("has_extension")
def hasExtension(self, name, unknown=None):
"""Determine whether extension `name` is loaded.
Use L{has_extension} instead!
@param name: The extension to check for, case insensitive
@type name: str
@param unknown: Old parameter which shouldn't be used anymore.
@return: If the extension is loaded
@rtype: bool
"""
if unknown is not None:
pywikibot.debug(u'unknown argument of hasExtension is deprecated.',
_logger)
extensions = self.siteinfo['extensions']
name = name.lower()
for ext in extensions:
if ext['name'].lower() == name:
return True
return False
@need_version("1.14")
def has_extension(self, name):
"""Determine whether extension `name` is loaded.
@param name: The extension to check for, case sensitive
@type name: str
@return: If the extension is loaded
@rtype: bool
"""
extensions = self.siteinfo['extensions']
for ext in extensions:
if ext['name'] == name:
return True
return False
@property
def siteinfo(self):
"""Site information dict."""
return self._siteinfo
@deprecated('siteinfo or Namespace instance')
def case(self):
"""Return this site's capitalization rule."""
# This is the global setting via $wgCapitalLinks, it is used whenever
# the namespaces don't propagate the namespace specific value.
return self.siteinfo['case']
def dbName(self):
"""Return this site's internal id."""
return self.siteinfo['wikiid']
@deprecated('APISite.lang')
def language(self):
"""Return the code for the language of this Site."""
return self.lang
@property
def lang(self):
"""Return the code for the language of this Site."""
return self.siteinfo['lang']
def version(self):
"""
Return live project version number as a string.
This overwrites the corresponding family method for APISite class. Use
L{pywikibot.tools.MediaWikiVersion} to compare MediaWiki versions.
"""
version = self.force_version()
if not version:
try:
version = self.siteinfo.get('generator',
expiry=1).split(' ')[1]
except pywikibot.data.api.APIError:
# May occur if you are not logged in (no API read permissions).
pywikibot.exception('You have no API read permissions. Seems '
'you are not logged in')
version = self.family.version(self.code)
return version
@property
def has_image_repository(self):
"""Return True if site has a shared image repository like Commons."""
code, fam = self.shared_image_repository()
return bool(code or fam)
@property
def has_data_repository(self):
"""Return True if site has a shared data repository like Wikidata."""
return self.data_repository() is not None
@property
@deprecated('has_data_repository')
def has_transcluded_data(self):
"""Return True if site has a shared data repository like Wikidata."""
return self.has_data_repository
def image_repository(self):
"""Return Site object for image repository e.g. commons."""
code, fam = self.shared_image_repository()
if bool(code or fam):
return pywikibot.Site(code, fam, self.username())
def data_repository(self):
"""
Return the data repository connected to this site.
@return: The data repository if one is connected or None otherwise.
@rtype: DataSite or None
"""
def handle_warning(mod, warning):
return (mod == 'query' and re.match(
r'Unrecognized value for parameter [\'"]meta[\'"]: wikibase',
warning))
req = self._simple_request(action='query', meta='wikibase')
req._warning_handler = handle_warning
data = req.submit()
if 'query' in data and 'wikibase' in data['query']:
data = data['query']['wikibase']['repo']['url']
url = data['base'] + data['scriptpath'] + '/index.php'
try:
return pywikibot.Site(url=url, user=self.username(),
interface='DataSite')
except SiteDefinitionError as e:
pywikibot.warning('Site "{0}" supports wikibase at "{1}", but '
'creation failed: {2}.'.format(self, url, e))
return None
else:
assert 'warnings' in data
return None
def is_image_repository(self):
"""Return True if Site object is the image repository."""
return self is self.image_repository()
def is_data_repository(self):
"""Return True if its data repository is itself."""
return self is self.data_repository()
def page_from_repository(self, item):
"""
Return a Page for this site object specified by wikibase item.
@param item: id number of item, "Q###",
@type item: str
@return: Page, or Category object given by wikibase item number
for this site object.
@rtype: pywikibot.Page or None
@raises UnknownExtension: site has no wikibase extension
@raises NotimplementedError: method not implemented for a wikibase site
"""
if not self.has_data_repository:
raise UnknownExtension(
'Wikibase is not implemented for {0}.'.format(self))
if self.is_data_repository():
raise NotImplementedError(
'page_from_repository method is not implemented for '
'Wikibase {0}.'.format(self))
repo = self.data_repository()
dp = pywikibot.ItemPage(repo, item)
try:
page_title = dp.getSitelink(self)
except pywikibot.NoPage:
return None
page = pywikibot.Page(self, page_title)
if page.namespace() == Namespace.CATEGORY:
page = pywikibot.Category(page)
return page
def nice_get_address(self, title):
"""Return shorter URL path to retrieve page titled 'title'."""
# 'title' is expected to be URL-encoded already
return self.siteinfo["articlepath"].replace("$1", title)
@need_version('1.21')
@need_extension('ProofreadPage')
def _cache_proofreadinfo(self, expiry=False):
"""Retrieve proofreadinfo from site and cache response.
Applicable only to sites with ProofreadPage extension installed.
The following info is returned by the query and cached:
- self._proofread_index_ns: Index Namespace
- self._proofread_page_ns: Page Namespace
- self._proofread_levels: a dictionary with:
keys: int in the range [0, 1, ..., 4]
values: category name corresponding to the 'key' quality level
e.g. on en.wikisource:
{0: u'Without text', 1: u'Not proofread', 2: u'Problematic',
3: u'Proofread', 4: u'Validated'}
@param expiry: either a number of days or a datetime.timedelta object
@type expiry: int (days), L{datetime.timedelta}, False (config)
@return: A tuple containing _proofread_index_ns,
self._proofread_page_ns and self._proofread_levels.
@rtype: Namespace, Namespace, dict
"""
if (not hasattr(self, '_proofread_index_ns') or
not hasattr(self, '_proofread_page_ns') or
not hasattr(self, '_proofread_levels')):
pirequest = self._request(
expiry=pywikibot.config.API_config_expiry
if expiry is False else expiry,
parameters={'action': 'query', 'meta': 'proofreadinfo',
'piprop': 'namespaces|qualitylevels'}
)
pidata = pirequest.submit()
ns_id = pidata['query']['proofreadnamespaces']['index']['id']
self._proofread_index_ns = self.namespaces[ns_id]
ns_id = pidata['query']['proofreadnamespaces']['page']['id']
self._proofread_page_ns = self.namespaces[ns_id]
self._proofread_levels = {}
for ql in pidata['query']['proofreadqualitylevels']:
self._proofread_levels[ql['id']] = ql['category']
@property
def proofread_index_ns(self):
"""Return Index namespace for the ProofreadPage extension."""
if not hasattr(self, '_proofread_index_ns'):
self._cache_proofreadinfo()
return self._proofread_index_ns
@property
def proofread_page_ns(self):
"""Return Page namespace for the ProofreadPage extension."""
if not hasattr(self, '_proofread_page_ns'):
self._cache_proofreadinfo()
return self._proofread_page_ns
@property
def proofread_levels(self):
"""Return Quality Levels for the ProofreadPage extension."""
if not hasattr(self, '_proofread_levels'):
self._cache_proofreadinfo()
return self._proofread_levels
def namespace(self, num, all=False):
"""Return string containing local name of namespace 'num'.
If optional argument 'all' is true, return all recognized
values for this namespace.
@param num: Namespace constant.
@type num: int
@param all: If True return a Namespace object. Otherwise
return the namespace name.
@return: local name or Namespace object
@rtype: str or Namespace
"""
if all:
return self.namespaces[num]
return self.namespaces[num][0]
@deprecated("version()")
def live_version(self, force=False):
"""Return the 'real' version number found on [[Special:Version]].
By default the version number is cached for one day.
@param force: If the version should be read always from the server and
never from the cache.
@type force: bool
@return: A tuple containing the major, minor version number and any
text after that. If an error occurred (0, 0, 0) is returned.
@rtype: int, int, str
"""
try:
versionstring = self.siteinfo.get('generator',
expiry=0 if force else 1)
m = re.match(r"^MediaWiki ([0-9]+)\.([0-9]+)(.*)$", versionstring)
if m:
return (int(m.group(1)), int(m.group(2)), m.group(3))
# May occur if you are not logged in (no API read permissions).
except api.APIError:
return (0, 0, 0)
def _update_page(self, page, query):
for pageitem in query:
if not self.sametitle(pageitem['title'],
page.title(withSection=False)):
raise InconsistentTitleReceived(page, pageitem['title'])
api.update_page(page, pageitem, query.props)
def loadpageinfo(self, page, preload=False):
"""Load page info from api and store in page attributes."""
title = page.title(withSection=False)
inprop = 'protection'
if preload:
inprop += '|preload'
query = self._generator(api.PropertyGenerator,
type_arg="info",
titles=title.encode(self.encoding()),
inprop=inprop)
self._update_page(page, query)
def loadcoordinfo(self, page):
"""Load [[mw:Extension:GeoData]] info."""
title = page.title(withSection=False)
query = self._generator(api.PropertyGenerator,
type_arg="coordinates",
titles=title.encode(self.encoding()),
coprop=['type', 'name', 'dim',
'country', 'region',
'globe'],
coprimary='all')
self._update_page(page, query)
@need_extension('PageImages')
def loadpageimage(self, page):
"""
Load [[mw:Extension:PageImages]] info.
@param page: The page for which to obtain the image
@type page: Page class
@raises APIError: PageImages extension is not installed
"""
title = page.title(withSection=False)
query = self._generator(api.PropertyGenerator,
type_arg='pageimages',
titles=title.encode(self.encoding()),
piprop=['name'])
self._update_page(page, query)
def loadpageprops(self, page):
"""Load page props for the given page."""
title = page.title(withSection=False)
query = self._generator(api.PropertyGenerator,
type_arg="pageprops",
titles=title.encode(self.encoding()),
)
self._update_page(page, query)
def loadimageinfo(self, page, history=False,
url_width=None, url_height=None, url_param=None):
"""Load image info from api and save in page attributes.
Parameters correspond to iiprops in:
[1] U{https://www.mediawiki.org/wiki/API:Imageinfo}
Parameters validation and error handling left to the API call.
@param history: if true, return the image's version history
@param url_width: see iiurlwidth in [1]
@param url_height: see iiurlheigth in [1]
@param url_param: see iiurlparam in [1]
"""
title = page.title(withSection=False)
args = {'titles': title,
'iiurlwidth': url_width,
'iiurlheight': url_height,
'iiurlparam': url_param,
}
if not history:
args["total"] = 1
query = self._generator(api.PropertyGenerator,
type_arg="imageinfo",
iiprop=["timestamp", "user", "comment",
"url", "size", "sha1", "mime",
"metadata", "archivename"],
**args)
# kept for backward compatibility
# TODO: when backward compatibility can be broken, adopt
# self._update_page() pattern and remove return
for pageitem in query:
if not self.sametitle(pageitem['title'], title):
raise InconsistentTitleReceived(page, pageitem['title'])
api.update_page(page, pageitem, query.props)
if "imageinfo" not in pageitem:
if "missing" in pageitem:
raise NoPage(page)
raise PageRelatedError(
page,
u"loadimageinfo: Query on %s returned no imageinfo")
return (pageitem['imageinfo']
if history else pageitem['imageinfo'][0])
@deprecated('Check the content model instead')
def loadflowinfo(self, page):
"""
Load Flow-related information about a given page.
Assumes that the Flow extension is installed.
@raises APIError: Flow extension is not installed
"""
title = page.title(withSection=False)
query = self._generator(api.PropertyGenerator,
type_arg="flowinfo",
titles=title.encode(self.encoding()),
)
self._update_page(page, query)
def page_exists(self, page):
"""Return True if and only if page is an existing page on site."""
return page.pageid > 0
def page_restrictions(self, page):
"""Return a dictionary reflecting page protections."""
if not self.page_exists(page):
raise NoPage(page)
if not hasattr(page, "_protection"):
self.loadpageinfo(page)
return page._protection
def page_can_be_edited(self, page):
"""
Determine if the page can be edited.
Return True if and only if:
- page is unprotected, and bot has an account for this site, or
- page is protected, and bot has a sysop account for this site.
@rtype: bool
"""
rest = self.page_restrictions(page)
sysop_protected = "edit" in rest and rest['edit'][0] == 'sysop'
try:
api.LoginManager(site=self, sysop=sysop_protected)
except NoUsername:
return False
return True
def page_isredirect(self, page):
"""Return True if and only if page is a redirect."""
if not hasattr(page, "_isredir"):
page._isredir = False # bug T56684
self.loadpageinfo(page)
return page._isredir
def getredirtarget(self, page):
"""
Return page object for the redirect target of page.
@param page: page to search redirects for
@type page: BasePage
@return: redirect target of page
@rtype: BasePage
@raises IsNotRedirectPage: page is not a redirect
@raises RuntimeError: no redirects found
@raises CircularRedirect: page is a circular redirect
@raises InterwikiRedirectPage: the redirect target is
on another site
"""
if not self.page_isredirect(page):
raise IsNotRedirectPage(page)
if hasattr(page, '_redirtarget'):
return page._redirtarget
title = page.title(withSection=False)
query = self._simple_request(
action='query',
prop='info',
titles=title,
redirects=True)
result = query.submit()
if "query" not in result or "redirects" not in result["query"]:
raise RuntimeError(
"getredirtarget: No 'redirects' found for page %s."
% title.encode(self.encoding()))
redirmap = dict((item['from'],
{'title': item['to'],
'section': u'#' + item['tofragment']
if 'tofragment' in item and item['tofragment']
else ''})
for item in result['query']['redirects'])
# Normalize title
for item in result['query'].get('normalized', []):
if item['from'] == title:
title = item['to']
break
if title not in redirmap:
raise RuntimeError(
"getredirtarget: 'redirects' contains no key for page %s."
% title.encode(self.encoding()))
target_title = u'%(title)s%(section)s' % redirmap[title]
if self.sametitle(title, target_title):
raise CircularRedirect(page)
if "pages" not in result['query']:
# No "pages" element might indicate a circular redirect
# Check that a "to" link is also a "from" link in redirmap
for _from, _to in redirmap.items():
if _to['title'] in redirmap:
raise CircularRedirect(page)
else:
target = pywikibot.Page(source=page.site, title=target_title)
# Check if target is on another site.
if target.site != page.site:
raise InterwikiRedirectPage(page, target)
else:
# Redirect to Special: & Media: pages, which do not work
# like redirects, but are rendered like a redirect.
page._redirtarget = target
return page._redirtarget
pagedata = list(result['query']['pages'].values())[0]
# There should be only one value in 'pages' (the ultimate
# target, also in case of double redirects).
if self.sametitle(pagedata['title'], target_title):
# target_title is the ultimate target
target = pywikibot.Page(self, pagedata['title'], pagedata['ns'])
api.update_page(target, pagedata, ['info'])
page._redirtarget = target
else:
# Target is an intermediate redirect -> double redirect.
# Do not bypass double-redirects and return the ultimate target;
# it would be impossible to detect and fix double-redirects.
# This handles also redirects to sections, as sametitle()
# does not ignore sections.
target = pywikibot.Page(self, target_title)
page._redirtarget = target
return page._redirtarget
def load_pages_from_pageids(self, pageids):
"""
Return a page generator from pageids.
Pages are iterated in the same order than in the underlying pageids.
Pageids are filtered and only one page is returned in case of
duplicate pageids.
@param pageids: an iterable that returns pageids (str or int),
or a comma- or pipe-separated string of pageids
(e.g. '945097,1483753, 956608' or '945097|483753|956608')
"""
if not pageids:
return
if isinstance(pageids, basestring):
pageids = pageids.replace('|', ',')
pageids = pageids.split(',')
pageids = [p.strip() for p in pageids]
# Validate pageids.
gen = (str(int(p)) for p in pageids if int(p) > 0)
# Find out how many pages can be specified at a time.
parameter = self._paraminfo.parameter('query+info', 'prop')
if self.logged_in() and self.has_right('apihighlimits'):
groupsize = int(parameter['highlimit'])
else:
groupsize = int(parameter['limit'])
for sublist in itergroup(filter_unique(gen), groupsize):
# Store the order of the input data.
priority_dict = dict(zip(sublist, range(len(sublist))))
prio_queue = []
next_prio = 0
params = {'pageids': sublist, }
rvgen = api.PropertyGenerator('info', site=self, parameters=params)
for pagedata in rvgen:
title = pagedata['title']
pageid = str(pagedata['pageid'])
page = pywikibot.Page(pywikibot.Link(title, source=self))
api.update_page(page, pagedata)
priority, page = heapq.heappushpop(prio_queue,
(priority_dict[pageid],
page))
# Smallest priority matches expected one; yield early.
if priority == next_prio:
yield page
next_prio += 1
else:
# Push onto the heap.
heapq.heappush(prio_queue, (priority, page))
# Extract data in the same order of the input data.
while prio_queue:
priority, page = heapq.heappop(prio_queue)
yield page
def preloadpages(self, pagelist, groupsize=50, templates=False,
langlinks=False, pageprops=False):
"""Return a generator to a list of preloaded pages.
Pages are iterated in the same order than in the underlying pagelist.
In case of duplicates in a groupsize batch, return the first entry.
@param pagelist: an iterable that returns Page objects
@param groupsize: how many Pages to query at a time
@type groupsize: int
@param templates: preload pages (typically templates) transcluded in
the provided pages
@type templates: bool
@param langlinks: preload all language links from the provided pages
to other languages
@type langlinks: bool
@param pageprops: preload various properties defined in page content
@type pageprops: bool
"""
props = 'revisions|info|categoryinfo'
if templates:
props += '|templates'
if langlinks:
props += '|langlinks'
if pageprops:
props += '|pageprops'
rvprop = ['ids', 'flags', 'timestamp', 'user', 'comment', 'content']
for sublist in itergroup(pagelist, groupsize):
# Do not use p.pageid property as it will force page loading.
pageids = [str(p._pageid) for p in sublist
if hasattr(p, "_pageid") and p._pageid > 0]
cache = {}
# In case of duplicates, return the first entry.
for priority, page in enumerate(sublist):
try:
cache.setdefault(page.title(withSection=False),
(priority, page))
except pywikibot.InvalidTitle:
pywikibot.exception()
prio_queue = []
next_prio = 0
rvgen = api.PropertyGenerator(props, site=self)
rvgen.set_maximum_items(-1) # suppress use of "rvlimit" parameter
if len(pageids) == len(sublist):
# only use pageids if all pages have them
rvgen.request['pageids'] = set(pageids)
else:
rvgen.request['titles'] = list(cache.keys())
rvgen.request['rvprop'] = rvprop
pywikibot.output(u"Retrieving %s pages from %s."
% (len(cache), self))
for pagedata in rvgen:
pywikibot.debug(u"Preloading %s" % pagedata, _logger)
try:
if pagedata['title'] not in cache:
# API always returns a "normalized" title which is
# usually the same as the canonical form returned by
# page.title(), but sometimes not (e.g.,
# gender-specific localizations of "User" namespace).
# This checks to see if there is a normalized title in
# the response that corresponds to the canonical form
# used in the query.
for key in cache:
if self.sametitle(key, pagedata['title']):
cache[pagedata['title']] = cache[key]
break
else:
pywikibot.warning(
u"preloadpages: Query returned unexpected "
u"title '%s'" % pagedata['title'])
continue
except KeyError:
pywikibot.debug(u"No 'title' in %s" % pagedata, _logger)
pywikibot.debug(u"pageids=%s" % pageids, _logger)
pywikibot.debug(u"titles=%s" % list(cache.keys()), _logger)
continue
priority, page = cache[pagedata['title']]
api.update_page(page, pagedata, rvgen.props)
priority, page = heapq.heappushpop(prio_queue,
(priority, page))
# Smallest priority matches expected one; yield.
if priority == next_prio:
yield page
next_prio += 1
else:
# Push back onto the heap.
heapq.heappush(prio_queue, (priority, page))
# Empty the heap.
while prio_queue:
priority, page = heapq.heappop(prio_queue)
yield page
def validate_tokens(self, types):
"""Validate if requested tokens are acceptable.
Valid tokens depend on mw version.
"""
_version = MediaWikiVersion(self.version())
if _version < MediaWikiVersion('1.20'):
types_wiki = self._paraminfo.parameter('query+info',
'token')['type']
types_wiki.append('patrol')
valid_types = [token for token in types if token in types_wiki]
# Pre 1.17, preload token was the same as the edit token.
if _version < MediaWikiVersion('1.17'):
if 'patrol' in types and 'edit' not in valid_types:
valid_types.append('edit')
elif _version < MediaWikiVersion('1.24wmf19'):
types_wiki = self._paraminfo.parameter('tokens',
'type')['type']
valid_types = [token for token in types if token in types_wiki]
else:
types_wiki_old = self._paraminfo.parameter('query+info',
'token')['type']
types_wiki_action = self._paraminfo.parameter('tokens',
'type')['type']
types_wiki = self._paraminfo.parameter('query+tokens',
'type')['type']
valid_types = [token for token in types if token in types_wiki]
for token in types:
if (token not in valid_types and
(token in types_wiki_old or
token in types_wiki_action)):
valid_types.append('csrf')
return valid_types
def get_tokens(self, types, all=False):
"""Preload one or multiple tokens.
For all MediaWiki versions prior to 1.20, only one token can be
retrieved at once.
For MediaWiki versions since 1.24wmfXXX a new token
system was introduced which reduced the amount of tokens available.
Most of them were merged into the 'csrf' token. If the token type in
the parameter is not known it will default to the 'csrf' token.
The other token types available are:
- deleteglobalaccount
- patrol (*)
- rollback
- setglobalaccountstatus
- userrights
- watch
(*) Patrol was added in v1.14.
Until v1.16, the patrol token is same as the edit token.
For v1.17-19, the patrol token must be obtained from the query
list recentchanges.
@param types: the types of token (e.g., "edit", "move", "delete");
see API documentation for full list of types
@type types: iterable
@param all: load all available tokens, if None only if it can be done
in one request.
@type all: bool
return: a dict with retrieved valid tokens.
rtype: dict
"""
def warn_handler(mod, text):
"""Filter warnings for not available tokens."""
return re.match(
r'Action \'\w+\' is not allowed for the current user', text)
user_tokens = {}
_version = MediaWikiVersion(self.version())
if _version < MediaWikiVersion('1.20'):
if all:
types_wiki = self._paraminfo.parameter('query+info',
'token')['type']
types.extend(types_wiki)
valid_tokens = set(self.validate_tokens(types))
# don't request patrol
query = api.PropertyGenerator(
'info',
titles='Dummy page',
intoken=valid_tokens - set(['patrol']),
site=self)
query.request._warning_handler = warn_handler
for item in query:
pywikibot.debug(unicode(item), _logger)
for tokentype in valid_tokens:
if (tokentype + 'token') in item:
user_tokens[tokentype] = item[tokentype + 'token']
# patrol token require special handling.
# TODO: try to catch exceptions?
if 'patrol' in valid_tokens:
if MediaWikiVersion(
'1.14') <= _version < MediaWikiVersion('1.17'):
if 'edit' in user_tokens:
user_tokens['patrol'] = user_tokens['edit']
else:
req = self._simple_request(action='query',
list='recentchanges',
rctoken='patrol', rclimit=1)
req._warning_handler = warn_handler
data = req.submit()
if 'query' in data:
data = data['query']
if 'recentchanges' in data:
item = data['recentchanges'][0]
pywikibot.debug(unicode(item), _logger)
if 'patroltoken' in item:
user_tokens['patrol'] = item['patroltoken']
else:
if _version < MediaWikiVersion('1.24wmf19'):
if all is not False:
types_wiki = self._paraminfo.parameter('tokens',
'type')['type']
types.extend(types_wiki)
req = self._simple_request(action='tokens',
type=self.validate_tokens(types))
else:
if all is not False:
types_wiki = self._paraminfo.parameter('query+tokens',
'type')['type']
types.extend(types_wiki)
req = self._simple_request(action='query', meta='tokens',
type=self.validate_tokens(types))
req._warning_handler = warn_handler
data = req.submit()
if 'query' in data:
data = data['query']
if 'tokens' in data and data['tokens']:
user_tokens = dict((key[:-5], val)
for key, val in data['tokens'].items()
if val != '+\\')
return user_tokens
@deprecated("the 'tokens' property")
def token(self, page, tokentype):
"""Return token retrieved from wiki to allow changing page content.
@param page: the Page for which a token should be retrieved
@param tokentype: the type of token (e.g., "edit", "move", "delete");
see API documentation for full list of types
"""
return self.tokens[tokentype]
@deprecated("the 'tokens' property")
def getToken(self, getalways=True, getagain=False, sysop=False):
"""DEPRECATED: Get edit token."""
if self.username(sysop) != self.user():
raise ValueError('The token for {0} was requested but only the '
'token for {1} can be retrieved.'.format(
self.username(sysop), self.user()))
if not getalways:
raise ValueError('In pywikibot/core getToken does not support the '
'getalways parameter.')
token = self.validate_tokens(['edit'])[0]
if getagain and token in self.tokens:
# invalidate token
del self.tokens._tokens[self.user()][token]
return self.tokens[token]
@deprecated("the 'tokens' property")
def getPatrolToken(self, sysop=False):
"""DEPRECATED: Get patrol token."""
if self.username(sysop) != self.user():
raise ValueError('The token for {0} was requested but only the '
'token for {1} can be retrieved.'.format(
self.username(sysop), self.user()))
return self.tokens['patrol']
def getParsedString(self, string, keeptags=None):
"""Deprecated.
compat defined keeptags as ['*'].
"""
return NotImplementedError
# following group of methods map more-or-less directly to API queries
def pagebacklinks(self, page, followRedirects=False, filterRedirects=None,
namespaces=None, total=None, content=False):
"""Iterate all pages that link to the given page.
@param page: The Page to get links to.
@param followRedirects: Also return links to redirects pointing to
the given page.
@param filterRedirects: If True, only return redirects to the given
page. If False, only return non-redirect links. If None, return
both (no filtering).
@param namespaces: If present, only return links from the namespaces
in this list.
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@param total: Maximum number of pages to retrieve in total.
@param content: if True, load the current content of each iterated page
(default False)
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
bltitle = page.title(withSection=False).encode(self.encoding())
blargs = {"gbltitle": bltitle}
if filterRedirects is not None:
blargs["gblfilterredir"] = (filterRedirects and "redirects" or
"nonredirects")
blgen = self._generator(api.PageGenerator, type_arg="backlinks",
namespaces=namespaces, total=total,
g_content=content, **blargs)
if followRedirects:
# links identified by MediaWiki as redirects may not really be,
# so we have to check each "redirect" page and see if it
# really redirects to this page
# see fixed MediaWiki bug T9304
redirgen = self._generator(api.PageGenerator,
type_arg="backlinks",
gbltitle=bltitle,
gblfilterredir="redirects")
genlist = {None: blgen}
for redir in redirgen:
if redir == page:
# if a wiki contains pages whose titles contain
# namespace aliases that existed before those aliases
# were defined (example: [[WP:Sandbox]] existed as a
# redirect to [[Wikipedia:Sandbox]] before the WP: alias
# was created) they can be returned as redirects to
# themselves; skip these
continue
if redir.getRedirectTarget() == page:
genlist[redir.title()] = self.pagebacklinks(
redir, followRedirects=True,
filterRedirects=filterRedirects,
namespaces=namespaces,
content=content
)
return itertools.chain(*list(genlist.values()))
return blgen
@deprecated_args(step=None)
def page_embeddedin(self, page, filterRedirects=None, namespaces=None,
total=None, content=False):
"""Iterate all pages that embedded the given page as a template.
@param page: The Page to get inclusions for.
@param filterRedirects: If True, only return redirects that embed
the given page. If False, only return non-redirect links. If
None, return both (no filtering).
@param namespaces: If present, only return links from the namespaces
in this list.
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@param content: if True, load the current content of each iterated page
(default False)
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
eiargs = {"geititle":
page.title(withSection=False).encode(self.encoding())}
if filterRedirects is not None:
eiargs["geifilterredir"] = (filterRedirects and "redirects" or
"nonredirects")
eigen = self._generator(api.PageGenerator, type_arg="embeddedin",
namespaces=namespaces, total=total,
g_content=content, **eiargs)
return eigen
@deprecated_args(step=None)
def pagereferences(self, page, followRedirects=False, filterRedirects=None,
withTemplateInclusion=True, onlyTemplateInclusion=False,
namespaces=None, total=None, content=False):
"""
Convenience method combining pagebacklinks and page_embeddedin.
@param namespaces: If present, only return links from the namespaces
in this list.
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
if onlyTemplateInclusion:
return self.page_embeddedin(page, namespaces=namespaces,
filterRedirects=filterRedirects,
total=total, content=content)
if not withTemplateInclusion:
return self.pagebacklinks(page, followRedirects=followRedirects,
filterRedirects=filterRedirects,
namespaces=namespaces,
total=total, content=content)
return itertools.islice(
itertools.chain(
self.pagebacklinks(
page, followRedirects, filterRedirects,
namespaces=namespaces, content=content),
self.page_embeddedin(
page, filterRedirects, namespaces=namespaces,
content=content)
), total)
@deprecated_args(step=None)
def pagelinks(self, page, namespaces=None, follow_redirects=False,
total=None, content=False):
"""Iterate internal wikilinks contained (or transcluded) on page.
@param namespaces: Only iterate pages in these namespaces
(default: all)
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@param follow_redirects: if True, yields the target of any redirects,
rather than the redirect page
@param content: if True, load the current content of each iterated page
(default False)
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
plargs = {}
if hasattr(page, "_pageid"):
plargs['pageids'] = str(page._pageid)
else:
pltitle = page.title(withSection=False).encode(self.encoding())
plargs['titles'] = pltitle
plgen = self._generator(api.PageGenerator, type_arg="links",
namespaces=namespaces, total=total,
g_content=content, redirects=follow_redirects,
**plargs)
return plgen
# Sortkey doesn't work with generator
@deprecated_args(withSortKey=None, step=None)
def pagecategories(self, page, total=None, content=False):
"""Iterate categories to which page belongs.
@param content: if True, load the current content of each iterated page
(default False); note that this means the contents of the
category description page, not the pages contained in the category
"""
clargs = {}
if hasattr(page, "_pageid"):
clargs['pageids'] = str(page._pageid)
else:
clargs['titles'] = page.title(
withSection=False).encode(self.encoding())
clgen = self._generator(api.PageGenerator,
type_arg='categories', total=total,
g_content=content, **clargs)
return clgen
@deprecated_args(step=None)
def pageimages(self, page, total=None, content=False):
"""Iterate images used (not just linked) on the page.
@param content: if True, load the current content of each iterated page
(default False); note that this means the content of the image
description page, not the image itself
"""
imtitle = page.title(withSection=False).encode(self.encoding())
imgen = self._generator(api.PageGenerator, type_arg="images",
titles=imtitle, total=total,
g_content=content)
return imgen
@deprecated_args(step=None)
def pagetemplates(self, page, namespaces=None, total=None, content=False):
"""Iterate templates transcluded (not just linked) on the page.
@param namespaces: Only iterate pages in these namespaces
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@param content: if True, load the current content of each iterated page
(default False)
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
tltitle = page.title(withSection=False).encode(self.encoding())
tlgen = self._generator(api.PageGenerator, type_arg="templates",
titles=tltitle, namespaces=namespaces,
total=total, g_content=content)
return tlgen
@deprecated_args(step=None)
def categorymembers(self, category, namespaces=None, sortby=None,
reverse=False, starttime=None, endtime=None,
startsort=None, endsort=None, total=None,
content=False, member_type=None):
"""Iterate members of specified category.
@param category: The Category to iterate.
@param namespaces: If present, only return category members from
these namespaces. To yield subcategories or files, use
parameter member_type instead.
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@param sortby: determines the order in which results are generated,
valid values are "sortkey" (default, results ordered by category
sort key) or "timestamp" (results ordered by time page was
added to the category)
@type sortby: str
@param reverse: if True, generate results in reverse order
(default False)
@param starttime: if provided, only generate pages added after this
time; not valid unless sortby="timestamp"
@type starttime: pywikibot.Timestamp
@param endtime: if provided, only generate pages added before this
time; not valid unless sortby="timestamp"
@type endtime: pywikibot.Timestamp
@param startsort: if provided, only generate pages >= this title
lexically; not valid if sortby="timestamp"
@type startsort: str
@param endsort: if provided, only generate pages <= this title
lexically; not valid if sortby="timestamp"
@type endsort: str
@param content: if True, load the current content of each iterated page
(default False)
@type content: bool
@param member_type: member type; if member_type includes 'page' and is
used in conjunction with sortby="timestamp", the API may limit
results to only pages in the first 50 namespaces.
@type member_type: str or iterable of str; values: page, subcat, file
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
if category.namespace() != 14:
raise Error(
u"categorymembers: non-Category page '%s' specified"
% category.title())
cmtitle = category.title(withSection=False).encode(self.encoding())
cmargs = {'type_arg': "categorymembers", 'gcmtitle': cmtitle,
'gcmprop': "ids|title|sortkey"}
if sortby in ["sortkey", "timestamp"]:
cmargs["gcmsort"] = sortby
elif sortby:
raise ValueError(
"categorymembers: invalid sortby value '%s'"
% sortby)
if starttime and endtime and starttime > endtime:
raise ValueError(
"categorymembers: starttime must be before endtime")
if startsort and endsort and startsort > endsort:
raise ValueError(
"categorymembers: startsort must be less than endsort")
if isinstance(member_type, basestring):
member_type = set([member_type])
if (member_type and
(sortby == 'timestamp' or
MediaWikiVersion(self.version()) < MediaWikiVersion("1.12"))):
# Retrofit cmtype/member_type, available on MW API 1.12+,
# to use namespaces available on earlier versions.
# Covert namespaces to a known type
namespaces = set(self.namespaces.resolve(namespaces or []))
if 'page' in member_type:
excluded_namespaces = set()
if 'file' not in member_type:
excluded_namespaces.add(6)
if 'subcat' not in member_type:
excluded_namespaces.add(14)
if namespaces:
if excluded_namespaces.intersection(namespaces):
raise ValueError(
'incompatible namespaces %r and member_type %r'
% (namespaces, member_type))
# All excluded namespaces are not present in `namespaces`.
else:
# If the number of namespaces is greater than permitted by
# the API, it will issue a warning and use the namespaces
# up until the limit, which will usually be sufficient.
# TODO: QueryGenerator should detect when the number of
# namespaces requested is higher than available, and split
# the request into several batches.
excluded_namespaces.update([-1, -2])
namespaces = set(self.namespaces) - excluded_namespaces
else:
if 'file' in member_type:
namespaces.add(6)
if 'subcat' in member_type:
namespaces.add(14)
member_type = None
if member_type:
cmargs['gcmtype'] = member_type
if reverse:
cmargs["gcmdir"] = "desc"
# API wants start/end params in opposite order if using descending
# sort; we take care of this reversal for the user
(starttime, endtime) = (endtime, starttime)
(startsort, endsort) = (endsort, startsort)
if starttime and sortby == "timestamp":
cmargs["gcmstart"] = starttime
elif starttime:
raise ValueError("categorymembers: "
"invalid combination of 'sortby' and 'starttime'")
if endtime and sortby == "timestamp":
cmargs["gcmend"] = endtime
elif endtime:
raise ValueError("categorymembers: "
"invalid combination of 'sortby' and 'endtime'")
if startsort and sortby != "timestamp":
cmargs["gcmstartsortkey"] = startsort
elif startsort:
raise ValueError("categorymembers: "
"invalid combination of 'sortby' and 'startsort'")
if endsort and sortby != "timestamp":
cmargs["gcmendsortkey"] = endsort
elif endsort:
raise ValueError("categorymembers: "
"invalid combination of 'sortby' and 'endsort'")
cmgen = self._generator(api.PageGenerator, namespaces=namespaces,
total=total, g_content=content, **cmargs)
return cmgen
def loadrevisions(self, page, getText=False, revids=None,
startid=None, endid=None, starttime=None,
endtime=None, rvdir=None, user=None, excludeuser=None,
section=None, sysop=False, step=None, total=None,
rollback=False):
"""Retrieve and store revision information.
By default, retrieves the last (current) revision of the page,
unless any of the optional parameters revids, startid, endid,
starttime, endtime, rvdir, user, excludeuser, or limit are
specified. Unless noted below, all parameters not specified
default to False.
If rvdir is False or not specified, startid must be greater than
endid if both are specified; likewise, starttime must be greater
than endtime. If rvdir is True, these relationships are reversed.
@param page: retrieve revisions of this Page (required unless ids
is specified)
@param getText: if True, retrieve the wiki-text of each revision;
otherwise, only retrieve the revision metadata (default)
@param section: if specified, retrieve only this section of the text
(getText must be True); section must be given by number (top of
the article is section 0), not name
@type section: int
@param revids: retrieve only the specified revision ids (raise
Exception if any of revids does not correspond to page
@type revids: an int, a str or a list of ints or strings
@param startid: retrieve revisions starting with this revid
@param endid: stop upon retrieving this revid
@param starttime: retrieve revisions starting at this Timestamp
@param endtime: stop upon reaching this Timestamp
@param rvdir: if false, retrieve newest revisions first (default);
if true, retrieve earliest first
@param user: retrieve only revisions authored by this user
@param excludeuser: retrieve all revisions not authored by this user
@param sysop: if True, switch to sysop account (if available) to
retrieve this page
"""
latest = (revids is None and
startid is None and
endid is None and
starttime is None and
endtime is None and
rvdir is None and
user is None and
excludeuser is None and
step is None and
total is None) # if True, retrieving current revision
# check for invalid argument combinations
if (startid is not None or endid is not None) and \
(starttime is not None or endtime is not None):
raise ValueError(
"loadrevisions: startid/endid combined with starttime/endtime")
if starttime is not None and endtime is not None:
if rvdir and starttime >= endtime:
raise ValueError(
"loadrevisions: starttime > endtime with rvdir=True")
if (not rvdir) and endtime >= starttime:
raise ValueError(
"loadrevisions: endtime > starttime with rvdir=False")
if startid is not None and endid is not None:
if rvdir and startid >= endid:
raise ValueError(
"loadrevisions: startid > endid with rvdir=True")
if (not rvdir) and endid >= startid:
raise ValueError(
"loadrevisions: endid > startid with rvdir=False")
rvargs = {'type_arg': 'info|revisions'}
rvargs['rvprop'] = ['ids', 'timestamp', 'flags', 'comment', 'user']
if MediaWikiVersion(self.version()) >= MediaWikiVersion('1.21'):
rvargs['rvprop'].append('contentmodel')
if MediaWikiVersion(self.version()) >= MediaWikiVersion('1.19'):
rvargs['rvprop'].append('sha1')
if getText:
rvargs['rvprop'].append('content')
if section is not None:
rvargs[u"rvsection"] = unicode(section)
if rollback:
self.login(sysop=sysop)
rvargs[u"rvtoken"] = "rollback"
if revids is None:
rvtitle = page.title(withSection=False).encode(self.encoding())
rvargs[u"titles"] = rvtitle
else:
if isinstance(revids, (int, basestring)):
ids = unicode(revids)
else:
ids = u"|".join(unicode(r) for r in revids)
rvargs[u"revids"] = ids
if rvdir:
rvargs[u"rvdir"] = u"newer"
elif rvdir is not None:
rvargs[u"rvdir"] = u"older"
if startid:
rvargs[u"rvstartid"] = startid
if endid:
rvargs[u"rvendid"] = endid
if starttime:
rvargs[u"rvstart"] = starttime
if endtime:
rvargs[u"rvend"] = endtime
if user:
rvargs[u"rvuser"] = user
elif excludeuser:
rvargs[u"rvexcludeuser"] = excludeuser
# TODO if sysop: something
# assemble API request
rvgen = self._generator(api.PropertyGenerator, total=total, **rvargs)
if step:
rvgen.set_query_increment = step
if latest or "revids" in rvgen.request:
rvgen.set_maximum_items(-1) # suppress use of rvlimit parameter
for pagedata in rvgen:
if not self.sametitle(pagedata['title'],
page.title(withSection=False)):
raise InconsistentTitleReceived(page, pagedata['title'])
if "missing" in pagedata:
raise NoPage(page)
api.update_page(page, pagedata, rvgen.props)
# TODO: expand support to other parameters of action=parse?
def get_parsed_page(self, page):
"""Retrieve parsed text of the page using action=parse."""
req = self._simple_request(action='parse', page=page)
data = req.submit()
assert 'parse' in data, "API parse response lacks 'parse' key"
assert 'text' in data['parse'], "API parse response lacks 'text' key"
parsed_text = data['parse']['text']['*']
return parsed_text
@deprecated_args(step=None)
def pagelanglinks(self, page, total=None, include_obsolete=False):
"""Iterate all interlanguage links on page, yielding Link objects.
@param include_obsolete: if true, yield even Link objects whose
site is obsolete
"""
lltitle = page.title(withSection=False)
llquery = self._generator(api.PropertyGenerator,
type_arg="langlinks",
titles=lltitle.encode(self.encoding()),
total=total)
for pageitem in llquery:
if not self.sametitle(pageitem['title'], lltitle):
raise InconsistentTitleReceived(page, pageitem['title'])
if 'langlinks' not in pageitem:
continue
for linkdata in pageitem['langlinks']:
link = pywikibot.Link.langlinkUnsafe(linkdata['lang'],
linkdata['*'],
source=self)
if link.site.obsolete and not include_obsolete:
continue
else:
yield link
@deprecated_args(step=None)
def page_extlinks(self, page, total=None):
"""Iterate all external links on page, yielding URL strings."""
eltitle = page.title(withSection=False)
elquery = self._generator(api.PropertyGenerator, type_arg="extlinks",
titles=eltitle.encode(self.encoding()),
total=total)
for pageitem in elquery:
if not self.sametitle(pageitem['title'], eltitle):
raise InconsistentTitleReceived(page, pageitem['title'])
if 'extlinks' not in pageitem:
continue
for linkdata in pageitem['extlinks']:
yield linkdata['*']
def getcategoryinfo(self, category):
"""Retrieve data on contents of category."""
cititle = category.title(withSection=False)
ciquery = self._generator(api.PropertyGenerator,
type_arg="categoryinfo",
titles=cititle.encode(self.encoding()))
self._update_page(category, ciquery)
def categoryinfo(self, category):
"""Retrieve data on contents of category."""
if not hasattr(category, "_catinfo"):
self.getcategoryinfo(category)
if not hasattr(category, "_catinfo"):
# a category that exists but has no contents returns no API result
category._catinfo = {'size': 0, 'pages': 0, 'files': 0,
'subcats': 0}
return category._catinfo
@deprecated_args(throttle=None, limit='total', step=None,
includeredirects='filterredir')
def allpages(self, start="!", prefix="", namespace=0, filterredir=None,
filterlanglinks=None, minsize=None, maxsize=None,
protect_type=None, protect_level=None, reverse=False,
total=None, content=False):
"""Iterate pages in a single namespace.
@param start: Start at this title (page need not exist).
@param prefix: Only yield pages starting with this string.
@param namespace: Iterate pages from this (single) namespace
@type namespace: int or Namespace.
@param filterredir: if True, only yield redirects; if False (and not
None), only yield non-redirects (default: yield both)
@param filterlanglinks: if True, only yield pages with language links;
if False (and not None), only yield pages without language links
(default: yield both)
@param minsize: if present, only yield pages at least this many
bytes in size
@param maxsize: if present, only yield pages at most this many bytes
in size
@param protect_type: only yield pages that have a protection of the
specified type
@type protect_type: str
@param protect_level: only yield pages that have protection at this
level; can only be used if protect_type is specified
@param reverse: if True, iterate in reverse Unicode lexigraphic
order (default: iterate in forward order)
@param content: if True, load the current content of each iterated page
(default False)
@raises KeyError: the namespace identifier was not resolved
@raises TypeError: the namespace identifier has an inappropriate
type such as bool, or an iterable with more than one namespace
"""
# backward compatibility test
if filterredir not in (True, False, None):
old = filterredir
if filterredir:
if filterredir == 'only':
filterredir = True
else:
filterredir = None
else:
filterredir = False
warn('The value "{0!r}" for "filterredir" is deprecated; use '
'{1} instead.'.format(old, filterredir),
DeprecationWarning, 3)
apgen = self._generator(api.PageGenerator, type_arg="allpages",
namespaces=namespace,
gapfrom=start, total=total,
g_content=content)
if prefix:
apgen.request["gapprefix"] = prefix
if filterredir is not None:
apgen.request['gapfilterredir'] = ('redirects' if filterredir else
'nonredirects')
if filterlanglinks is not None:
apgen.request['gapfilterlanglinks'] = ('withlanglinks'
if filterlanglinks else
'withoutlanglinks')
if isinstance(minsize, int):
apgen.request["gapminsize"] = str(minsize)
if isinstance(maxsize, int):
apgen.request["gapmaxsize"] = str(maxsize)
if isinstance(protect_type, basestring):
apgen.request["gapprtype"] = protect_type
if isinstance(protect_level, basestring):
apgen.request["gapprlevel"] = protect_level
if reverse:
apgen.request["gapdir"] = "descending"
return apgen
@deprecated("Site.allpages()")
def prefixindex(self, prefix, namespace=0, includeredirects=True):
"""Yield all pages with a given prefix. Deprecated.
Use allpages() with the prefix= parameter instead of this method.
"""
if not includeredirects:
filterredir = False
elif includeredirects == 'only':
filterredir = True
else:
filterredir = None
return self.allpages(prefix=prefix, namespace=namespace,
filterredir=filterredir)
@deprecated_args(step=None)
def alllinks(self, start="!", prefix="", namespace=0, unique=False,
fromids=False, total=None):
"""Iterate all links to pages (which need not exist) in one namespace.
Note that, in practice, links that were found on pages that have
been deleted may not have been removed from the links table, so this
method can return false positives.
@param start: Start at this title (page need not exist).
@param prefix: Only yield pages starting with this string.
@param namespace: Iterate pages from this (single) namespace
@type namespace: int or Namespace
@param unique: If True, only iterate each link title once (default:
iterate once for each linking page)
@param fromids: if True, include the pageid of the page containing
each link (default: False) as the '_fromid' attribute of the Page;
cannot be combined with unique
@raises KeyError: the namespace identifier was not resolved
@raises TypeError: the namespace identifier has an inappropriate
type such as bool, or an iterable with more than one namespace
"""
if unique and fromids:
raise Error("alllinks: unique and fromids cannot both be True.")
algen = self._generator(api.ListGenerator, type_arg="alllinks",
namespaces=namespace, alfrom=start,
total=total, alunique=unique)
if prefix:
algen.request["alprefix"] = prefix
if fromids:
algen.request["alprop"] = "title|ids"
for link in algen:
p = pywikibot.Page(self, link['title'], link['ns'])
if fromids:
p._fromid = link['fromid']
yield p
@deprecated_args(step=None)
def allcategories(self, start='!', prefix='', total=None,
reverse=False, content=False):
"""Iterate categories used (which need not have a Category page).
Iterator yields Category objects. Note that, in practice, links that
were found on pages that have been deleted may not have been removed
from the database table, so this method can return false positives.
@param start: Start at this category title (category need not exist).
@param prefix: Only yield categories starting with this string.
@param reverse: if True, iterate in reverse Unicode lexigraphic
order (default: iterate in forward order)
@param content: if True, load the current content of each iterated page
(default False); note that this means the contents of the category
description page, not the pages that are members of the category
"""
acgen = self._generator(api.PageGenerator,
type_arg="allcategories", gacfrom=start,
total=total, g_content=content)
if prefix:
acgen.request["gacprefix"] = prefix
if reverse:
acgen.request["gacdir"] = "descending"
return acgen
@deprecated("Site.allcategories()")
def categories(self, number=10, repeat=False):
"""DEPRECATED."""
if repeat:
limit = None
else:
limit = number
return self.allcategories(total=limit)
def isBot(self, username):
"""Return True is username is a bot user."""
return username in [userdata['name'] for userdata in self.botusers()]
@deprecated_args(step=None)
def botusers(self, total=None):
"""Iterate bot users.
Iterated values are dicts containing 'name', 'userid', 'editcount',
'registration', and 'groups' keys. 'groups' will be present only if
the user is a member of at least 1 group, and will be a list of
unicodes; all the other values are unicodes and should always be
present.
"""
if not hasattr(self, "_bots"):
self._bots = {}
if not self._bots:
for item in self.allusers(group='bot', total=total):
self._bots.setdefault(item['name'], item)
for value in self._bots.values():
yield value
@deprecated_args(step=None)
def allusers(self, start='!', prefix='', group=None, total=None):
"""Iterate registered users, ordered by username.
Iterated values are dicts containing 'name', 'editcount',
'registration', and (sometimes) 'groups' keys. 'groups' will be
present only if the user is a member of at least 1 group, and will
be a list of unicodes; all the other values are unicodes and should
always be present.
@param start: start at this username (name need not exist)
@param prefix: only iterate usernames starting with this substring
@param group: only iterate users that are members of this group
@type group: str
"""
augen = self._generator(api.ListGenerator, type_arg="allusers",
auprop="editcount|groups|registration",
aufrom=start, total=total)
if prefix:
augen.request["auprefix"] = prefix
if group:
augen.request["augroup"] = group
return augen
@deprecated_args(step=None)
def allimages(self, start="!", prefix="", minsize=None, maxsize=None,
reverse=False, sha1=None, sha1base36=None,
total=None, content=False):
"""Iterate all images, ordered by image title.
Yields FilePages, but these pages need not exist on the wiki.
@param start: start at this title (name need not exist)
@param prefix: only iterate titles starting with this substring
@param minsize: only iterate images of at least this many bytes
@param maxsize: only iterate images of no more than this many bytes
@param reverse: if True, iterate in reverse lexigraphic order
@param sha1: only iterate image (it is theoretically possible there
could be more than one) with this sha1 hash
@param sha1base36: same as sha1 but in base 36
@param content: if True, load the current content of each iterated page
(default False); note that this means the content of the image
description page, not the image itself
"""
aigen = self._generator(api.PageGenerator,
type_arg="allimages", gaifrom=start,
total=total, g_content=content)
if prefix:
aigen.request["gaiprefix"] = prefix
if isinstance(minsize, int):
aigen.request["gaiminsize"] = str(minsize)
if isinstance(maxsize, int):
aigen.request["gaimaxsize"] = str(maxsize)
if reverse:
aigen.request["gaidir"] = "descending"
if sha1:
aigen.request["gaisha1"] = sha1
if sha1base36:
aigen.request["gaisha1base36"] = sha1base36
return aigen
@deprecated_args(step=None)
def blocks(self, starttime=None, endtime=None, reverse=False,
blockids=None, users=None, iprange=None, total=None):
"""Iterate all current blocks, in order of creation.
The iterator yields dicts containing keys corresponding to the
block properties.
@see: U{https://www.mediawiki.org/wiki/API:Blocks}
@note: logevents only logs user blocks, while this method
iterates all blocks including IP ranges.
@note: C{userid} key will be given for mw 1.18+ only
@note: C{iprange} parameter cannot be used together with C{users}.
@param starttime: start iterating at this Timestamp
@type starttime: pywikibot.Timestamp
@param endtime: stop iterating at this Timestamp
@type endtime: pywikibot.Timestamp
@param reverse: if True, iterate oldest blocks first (default: newest)
@type reverse: bool
@param blockids: only iterate blocks with these id numbers. Numbers
must be separated by '|' if given by a basestring.
@type blockids: basestring, tuple or list
@param users: only iterate blocks affecting these usernames or IPs
@type users: basestring, tuple or list
@param iprange: a single IP or an IP range. Ranges broader than
IPv4/16 or IPv6/19 are not accepted.
@type iprange: str
@param total: total amount of block entries
@type total: int
"""
if starttime and endtime:
if reverse:
if starttime > endtime:
raise Error(
"blocks: "
"starttime must be before endtime with reverse=True")
else:
if endtime > starttime:
raise Error(
"blocks: "
"endtime must be before starttime with reverse=False")
bkgen = self._generator(api.ListGenerator, type_arg="blocks",
total=total)
bkgen.request['bkprop'] = ['id', 'user', 'by', 'timestamp', 'expiry',
'reason', 'range', 'flags']
if MediaWikiVersion(self.version()) >= MediaWikiVersion('1.18'):
bkgen.request['bkprop'] += ['userid']
if starttime:
bkgen.request["bkstart"] = starttime
if endtime:
bkgen.request["bkend"] = endtime
if reverse:
bkgen.request["bkdir"] = "newer"
if blockids:
bkgen.request["bkids"] = blockids
if users:
if isinstance(users, basestring):
users = users.split('|')
# actual IPv6 addresses (anonymous users) are uppercase, but they
# have never a :: in the username (so those are registered users)
users = [user.upper() if is_IP(user) and '::' not in user else user
for user in users]
bkgen.request["bkusers"] = users
elif iprange:
bkgen.request['bkip'] = iprange
return bkgen
@deprecated_args(step=None)
def exturlusage(self, url=None, protocol="http", namespaces=None,
total=None, content=False):
"""Iterate Pages that contain links to the given URL.
@param url: The URL to search for (without the protocol prefix);
this may include a '*' as a wildcard, only at the start of the
hostname
@param protocol: The protocol prefix (default: "http")
"""
eugen = self._generator(api.PageGenerator, type_arg="exturlusage",
geuquery=url, geuprotocol=protocol,
namespaces=namespaces,
total=total, g_content=content)
return eugen
@deprecated_args(step=None)
def imageusage(self, image, namespaces=None, filterredir=None,
total=None, content=False):
"""Iterate Pages that contain links to the given FilePage.
@param image: the image to search for (FilePage need not exist on
the wiki)
@type image: FilePage
@param namespaces: If present, only iterate pages in these namespaces
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@param filterredir: if True, only yield redirects; if False (and not
None), only yield non-redirects (default: yield both)
@param content: if True, load the current content of each iterated page
(default False)
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
iuargs = {'giutitle': image.title(withSection=False)}
if filterredir is not None:
iuargs['giufilterredir'] = ('redirects' if filterredir else
'nonredirects')
iugen = self._generator(api.PageGenerator, type_arg="imageusage",
namespaces=namespaces,
total=total, g_content=content, **iuargs)
return iugen
@deprecated_args(step=None)
def logevents(self, logtype=None, user=None, page=None, namespace=None,
start=None, end=None, reverse=False, tag=None, total=None):
"""Iterate all log entries.
@note: logevents with logtype='block' only logs user blocks whereas
site.blocks iterates all blocks including IP ranges.
@param logtype: only iterate entries of this type (see wiki
documentation for available types, which will include "block",
"protect", "rights", "delete", "upload", "move", "import",
"patrol", "merge")
@type logtype: basestring
@param user: only iterate entries that match this user name
@type user: basestring
@param page: only iterate entries affecting this page
@type page: Page or basestring
@param namespace: namespace to retrieve logevents from
@type namespace: int or Namespace
@param start: only iterate entries from and after this Timestamp
@type start: Timestamp or ISO date string
@param end: only iterate entries up to and through this Timestamp
@type end: Timestamp or ISO date string
@param reverse: if True, iterate oldest entries first (default: newest)
@type reverse: bool
@param tag: only iterate entries tagged with this tag
@type tag: basestring
@param total: maximum number of events to iterate
@type total: int
@rtype: iterable
@raises KeyError: the namespace identifier was not resolved
@raises TypeError: the namespace identifier has an inappropriate
type such as bool, or an iterable with more than one namespace
"""
if start and end:
self.assert_valid_iter_params('logevents', start, end, reverse)
legen = self._generator(api.LogEntryListGenerator, type_arg=logtype,
total=total)
if logtype is not None:
legen.request["letype"] = logtype
if user is not None:
legen.request["leuser"] = user
if page is not None:
legen.request["letitle"] = page
if start is not None:
legen.request["lestart"] = start
if end is not None:
legen.request["leend"] = end
if reverse:
legen.request["ledir"] = "newer"
if namespace:
legen.request["lenamespace"] = namespace
if tag:
# Supported in version 1.16+; earlier sites will cause APIError
legen.request['letag'] = tag
return legen
@deprecated('APISite.logevents()')
@deprecated_args(repeat=None)
def logpages(self, number=50, mode=None, title=None, user=None,
namespace=[], start=None, end=None, tag=None, newer=False,
dump=False, offset=None):
"""
Iterate log pages. DEPRECATED.
When dump is enabled, the raw API dict is returned.
@rtype: tuple of Page, str, int, str
"""
if offset:
assert not start
assert isinstance(offset, int)
offset = datetime.timedelta(hours=offset)
start = pywikibot.Timestamp.utcnow() - offset
gen = self.logevents(logtype=mode, page=title, tag=tag,
user=user, namespace=namespace,
start=start, end=end, reverse=newer,
total=number)
for entry in gen:
if dump:
yield entry.data
else:
yield (entry.page(),
entry.user(),
int(entry.timestamp().totimestampformat()),
entry.comment())
@deprecated_args(returndict=None, nobots=None, rcshow=None, rcprop=None,
rctype='changetype', revision=None, repeat=None,
rcstart='start', rcend='end', rcdir=None, step=None,
includeredirects='showRedirects', namespace='namespaces',
rcnamespace='namespaces', number='total', rclimit='total')
def recentchanges(self, start=None, end=None, reverse=False,
namespaces=None, pagelist=None, changetype=None,
showMinor=None, showBot=None, showAnon=None,
showRedirects=None, showPatrolled=None, topOnly=False,
total=None, user=None, excludeuser=None, tag=None):
"""Iterate recent changes.
@param start: Timestamp to start listing from
@type start: pywikibot.Timestamp
@param end: Timestamp to end listing at
@type end: pywikibot.Timestamp
@param reverse: if True, start with oldest changes (default: newest)
@type reverse: bool
@param namespaces: only iterate pages in these namespaces
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@param pagelist: iterate changes to pages in this list only
@param pagelist: list of Pages
@param changetype: only iterate changes of this type ("edit" for
edits to existing pages, "new" for new pages, "log" for log
entries)
@type changetype: basestring
@param showMinor: if True, only list minor edits; if False, only list
non-minor edits; if None, list all
@type showMinor: bool or None
@param showBot: if True, only list bot edits; if False, only list
non-bot edits; if None, list all
@type showBot: bool or None
@param showAnon: if True, only list anon edits; if False, only list
non-anon edits; if None, list all
@type showAnon: bool or None
@param showRedirects: if True, only list edits to redirect pages; if
False, only list edits to non-redirect pages; if None, list all
@type showRedirects: bool or None
@param showPatrolled: if True, only list patrolled edits; if False,
only list non-patrolled edits; if None, list all
@type showPatrolled: bool or None
@param topOnly: if True, only list changes that are the latest revision
(default False)
@type topOnly: bool
@param user: if not None, only list edits by this user or users
@type user: basestring|list
@param excludeuser: if not None, exclude edits by this user or users
@type excludeuser: basestring|list
@param tag: a recent changes tag
@type tag: str
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
if start and end:
self.assert_valid_iter_params('recentchanges', start, end, reverse)
rcgen = self._generator(api.ListGenerator, type_arg="recentchanges",
rcprop="user|comment|timestamp|title|ids"
'|sizes|redirect|loginfo|flags|tags',
namespaces=namespaces,
total=total, rctoponly=topOnly)
if start is not None:
rcgen.request["rcstart"] = start
if end is not None:
rcgen.request["rcend"] = end
if reverse:
rcgen.request["rcdir"] = "newer"
if pagelist:
if MediaWikiVersion(self.version()) > MediaWikiVersion("1.14"):
pywikibot.warning(
u"recentchanges: pagelist option is disabled; ignoring.")
else:
rcgen.request["rctitles"] = (p.title(withSection=False)
for p in pagelist)
if changetype:
rcgen.request["rctype"] = changetype
filters = {'minor': showMinor,
'bot': showBot,
'anon': showAnon,
'redirect': showRedirects,
}
if showPatrolled is not None and (
self.has_right('patrol') or self.has_right('patrolmarks')):
rcgen.request['rcprop'] += ['patrolled']
filters['patrolled'] = showPatrolled
rcgen.request['rcshow'] = api.OptionSet(self, 'recentchanges', 'show',
filters)
if user:
rcgen.request['rcuser'] = user
if excludeuser:
rcgen.request['rcexcludeuser'] = excludeuser
rcgen.request['rctag'] = tag
return rcgen
@deprecated_args(number='total', step=None, key='searchstring',
getredirects='get_redirects')
def search(self, searchstring, namespaces=None, where="text",
get_redirects=False, total=None, content=False):
"""Iterate Pages that contain the searchstring.
Note that this may include non-existing Pages if the wiki's database
table contains outdated entries.
@param searchstring: the text to search for
@type searchstring: unicode
@param where: Where to search; value must be "text", "title" or
"nearmatch" (many wikis do not support title or nearmatch search)
@param namespaces: search only in these namespaces (defaults to all)
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@param get_redirects: if True, include redirects in results. Since
version MediaWiki 1.23 it will always return redirects.
@param content: if True, load the current content of each iterated page
(default False)
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
where_types = ['text', 'title', 'titles']
if MediaWikiVersion(self.version()) >= MediaWikiVersion('1.17'):
where_types.append('nearmatch')
if not searchstring:
raise Error("search: searchstring cannot be empty")
if where not in where_types:
raise Error("search: unrecognized 'where' value: %s" % where)
if where in ('title', 'titles'):
if isinstance(self.family, WikimediaFamily):
# 'title' search was disabled, use intitle instead
searchstring = 'intitle:' + searchstring
issue_deprecation_warning(
"where='{0}'".format(where),
"searchstring='{0}'".format(searchstring), 2)
where = None # default
else:
if where == 'titles':
issue_deprecation_warning("where='titles'",
"where='title'", 2)
if MediaWikiVersion(self.version()) < MediaWikiVersion('1.11'):
where = 'titles'
else:
where = 'title'
if not namespaces and namespaces != 0:
namespaces = [ns_id for ns_id in self.namespaces if ns_id >= 0]
srgen = self._generator(api.PageGenerator, type_arg="search",
gsrsearch=searchstring, gsrwhat=where,
namespaces=namespaces,
total=total, g_content=content)
if MediaWikiVersion(self.version()) < MediaWikiVersion('1.23'):
srgen.request['gsrredirects'] = get_redirects
return srgen
@deprecated_args(step=None)
def usercontribs(self, user=None, userprefix=None, start=None, end=None,
reverse=False, namespaces=None, showMinor=None,
total=None, top_only=False):
"""Iterate contributions by a particular user.
Iterated values are in the same format as recentchanges.
@param user: Iterate contributions by this user (name or IP)
@param userprefix: Iterate contributions by all users whose names
or IPs start with this substring
@param start: Iterate contributions starting at this Timestamp
@param end: Iterate contributions ending at this Timestamp
@param reverse: Iterate oldest contributions first (default: newest)
@param namespaces: only iterate pages in these namespaces
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@param showMinor: if True, iterate only minor edits; if False and
not None, iterate only non-minor edits (default: iterate both)
@param top_only: if True, iterate only edits which are the latest
revision
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
if not (user or userprefix):
raise Error(
"usercontribs: either user or userprefix must be non-empty")
if start and end:
self.assert_valid_iter_params('usercontribs', start, end, reverse)
ucgen = self._generator(api.ListGenerator, type_arg="usercontribs",
ucprop="ids|title|timestamp|comment|flags",
namespaces=namespaces,
total=total, uctoponly=top_only)
if user:
ucgen.request["ucuser"] = user
if userprefix:
ucgen.request["ucuserprefix"] = userprefix
if start is not None:
ucgen.request["ucstart"] = str(start)
if end is not None:
ucgen.request["ucend"] = str(end)
if reverse:
ucgen.request["ucdir"] = "newer"
option_set = api.OptionSet(self, 'usercontribs', 'show')
option_set['minor'] = showMinor
ucgen.request['ucshow'] = option_set
return ucgen
@deprecated_args(step=None)
def watchlist_revs(self, start=None, end=None, reverse=False,
namespaces=None, showMinor=None, showBot=None,
showAnon=None, total=None):
"""Iterate revisions to pages on the bot user's watchlist.
Iterated values will be in same format as recentchanges.
@param start: Iterate revisions starting at this Timestamp
@param end: Iterate revisions ending at this Timestamp
@param reverse: Iterate oldest revisions first (default: newest)
@param namespaces: only iterate pages in these namespaces
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@param showMinor: if True, only list minor edits; if False (and not
None), only list non-minor edits
@param showBot: if True, only list bot edits; if False (and not
None), only list non-bot edits
@param showAnon: if True, only list anon edits; if False (and not
None), only list non-anon edits
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
if start and end:
self.assert_valid_iter_params(
'watchlist_revs', start, end, reverse)
wlgen = self._generator(
api.ListGenerator, type_arg='watchlist',
wlprop='user|comment|timestamp|title|ids|flags',
wlallrev='', namespaces=namespaces, total=total)
# TODO: allow users to ask for "patrol" as well?
if start is not None:
wlgen.request["wlstart"] = start
if end is not None:
wlgen.request["wlend"] = end
if reverse:
wlgen.request["wldir"] = "newer"
filters = {'minor': showMinor,
'bot': showBot,
'anon': showAnon}
wlgen.request['wlshow'] = api.OptionSet(self, 'watchlist', 'show',
filters)
return wlgen
# TODO: T75370
@deprecated_args(step=None)
def deletedrevs(self, page, start=None, end=None, reverse=None,
get_text=False, total=None):
"""Iterate deleted revisions.
Each value returned by the iterator will be a dict containing the
'title' and 'ns' keys for a particular Page and a 'revisions' key
whose value is a list of revisions in the same format as
recentchanges (plus a 'content' element if requested). If get_text
is true, the toplevel dict will contain a 'token' key as well.
@param page: The page to check for deleted revisions
@param start: Iterate revisions starting at this Timestamp
@param end: Iterate revisions ending at this Timestamp
@param reverse: Iterate oldest revisions first (default: newest)
@param get_text: If True, retrieve the content of each revision and
an undelete token
"""
if start and end:
self.assert_valid_iter_params('deletedrevs', start, end, reverse)
if not self.logged_in():
self.login()
if "deletedhistory" not in self.userinfo['rights']:
try:
self.login(True)
except NoUsername:
pass
if "deletedhistory" not in self.userinfo['rights']:
raise Error(
"deletedrevs: "
"User:%s not authorized to access deleted revisions."
% self.user())
if get_text:
if "undelete" not in self.userinfo['rights']:
try:
self.login(True)
except NoUsername:
pass
if "undelete" not in self.userinfo['rights']:
raise Error(
"deletedrevs: "
"User:%s not authorized to view deleted content."
% self.user())
drgen = self._generator(api.ListGenerator, type_arg="deletedrevs",
titles=page.title(withSection=False),
drprop="revid|user|comment|minor",
total=total)
if get_text:
drgen.request['drprop'] = (drgen.request['drprop'] +
['content', 'token'])
if start is not None:
drgen.request["drstart"] = start
if end is not None:
drgen.request["drend"] = end
if reverse:
drgen.request["drdir"] = "newer"
return drgen
def users(self, usernames):
"""Iterate info about a list of users by name or IP.
@param usernames: a list of user names
@type usernames: list, or other iterable, of unicodes
"""
if not isinstance(usernames, basestring):
usernames = u"|".join(usernames)
usgen = api.ListGenerator(
"users", ususers=usernames, site=self,
usprop="blockinfo|groups|editcount|registration|emailable"
)
return usgen
@deprecated("Site.randompages()")
def randompage(self, redirect=False):
"""
DEPRECATED.
@param redirect: Return a random redirect page
@rtype: pywikibot.Page
"""
return self.randompages(total=1, redirects=redirect)
@deprecated("Site.randompages()")
def randomredirectpage(self):
"""
DEPRECATED: Use Site.randompages() instead.
@return: Return a random redirect page
"""
return self.randompages(total=1, redirects=True)
@deprecated_args(step=None)
def randompages(self, total=None, namespaces=None,
redirects=False, content=False):
"""Iterate a number of random pages.
Pages are listed in a fixed sequence, only the starting point is
random.
@param total: the maximum number of pages to iterate
@param namespaces: only iterate pages in these namespaces.
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@param redirects: if True, include only redirect pages in results
(default: include only non-redirects)
@param content: if True, load the current content of each iterated page
(default False)
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
rngen = self._generator(api.PageGenerator, type_arg="random",
namespaces=namespaces, total=total,
g_content=content, grnredirect=redirects)
return rngen
# Catalog of editpage error codes, for use in generating messages.
# The block at the bottom are page related errors.
_ep_errors = {
"noapiwrite": "API editing not enabled on %(site)s wiki",
'writeapidenied':
'User %(user)s is not authorized to edit on %(site)s wiki',
'cantcreate':
'User %(user)s not authorized to create new pages on %(site)s '
'wiki',
"cantcreate-anon":
'Bot is not logged in, and anon users are not authorized to '
'create new pages on %(site)s wiki',
"noimageredirect-anon":
'Bot is not logged in, and anon users are not authorized to '
'create image redirects on %(site)s wiki',
'noimageredirect': 'User %(user)s not authorized to create image '
'redirects on %(site)s wiki',
"filtered": "%(info)s",
"contenttoobig": "%(info)s",
'noedit-anon': 'Bot is not logged in, and anon users are not '
'authorized to edit on %(site)s wiki',
'noedit':
'User %(user)s not authorized to edit pages on %(site)s wiki',
"missingtitle": NoCreateError,
"editconflict": EditConflict,
"articleexists": PageCreatedConflict,
"pagedeleted": PageDeletedConflict,
"protectedpage": LockedPage,
"protectedtitle": LockedNoPage,
"cascadeprotected": CascadeLockedPage,
}
_ep_text_overrides = set(['appendtext', 'prependtext', 'undo'])
@must_be(group='user')
def editpage(self, page, summary=None, minor=True, notminor=False,
bot=True, recreate=True, createonly=False, nocreate=False,
watch=None, **kwargs):
"""Submit an edit to be saved to the wiki.
@param page: The Page to be saved.
By default its .text property will be used
as the new text to be saved to the wiki
@param summary: the edit summary
@param minor: if True (default), mark edit as minor
@param notminor: if True, override account preferences to mark edit
as non-minor
@param recreate: if True (default), create new page even if this
title has previously been deleted
@param createonly: if True, raise an error if this title already
exists on the wiki
@param nocreate: if True, raise an error if the page does not exist
@param watch: Specify how the watchlist is affected by this edit, set
to one of "watch", "unwatch", "preferences", "nochange":
* watch: add the page to the watchlist
* unwatch: remove the page from the watchlist
The following settings are supported by mw >= 1.16 only
* preferences: use the preference settings (default)
* nochange: don't change the watchlist
@param bot: if True, mark edit with bot flag
@kwarg text: Overrides Page.text
@type text: unicode
@kwarg section: Edit an existing numbered section or
a new section ('new')
@type section: int or str
@kwarg prependtext: Prepend text. Overrides Page.text
@type text: unicode
@kwarg appendtext: Append text. Overrides Page.text.
@type text: unicode
@kwarg undo: Revision id to undo. Overrides Page.text
@type undo: int
@return: True if edit succeeded, False if it failed
@rtype: bool
@raises Error: No text to be saved
@raises NoPage: recreate is disabled and page does not exist
"""
basetimestamp = True
text_overrides = self._ep_text_overrides.intersection(kwargs.keys())
if text_overrides:
if 'text' in kwargs:
raise ValueError('text can not be used with any of %s'
% ', '.join(text_overrides))
if len(text_overrides) > 1:
raise ValueError('Multiple text overrides used: %s'
% ', '.join(text_overrides))
text = None
basetimestamp = False
elif 'text' in kwargs:
text = kwargs.pop('text')
if 'section' in kwargs and kwargs['section'] == 'new':
basetimestamp = False
elif 'section' in kwargs:
raise ValueError('text must be used with section')
else:
text = page.text
if text is None:
raise Error("editpage: no text to be saved")
if basetimestamp or not recreate:
try:
lastrev = page.latest_revision
basetimestamp = lastrev.timestamp
except NoPage:
basetimestamp = False
if not recreate:
raise
token = self.tokens['edit']
if bot is None:
bot = ("bot" in self.userinfo["rights"])
params = dict(action='edit', title=page,
text=text, token=token, summary=summary, bot=bot,
recreate=recreate, createonly=createonly,
nocreate=nocreate, minor=minor,
notminor=not minor and notminor,
**kwargs)
if basetimestamp and 'basetimestamp' not in kwargs:
params['basetimestamp'] = basetimestamp
watch_items = set(["watch", "unwatch", "preferences", "nochange"])
if watch in watch_items:
if MediaWikiVersion(self.version()) < MediaWikiVersion("1.16"):
if watch in ['preferences', 'nochange']:
pywikibot.warning(u'The watch value {0} is not supported '
'by {1}'.format(watch, self))
else:
params[watch] = True
else:
params['watchlist'] = watch
elif watch:
pywikibot.warning(
u"editpage: Invalid watch value '%(watch)s' ignored."
% {'watch': watch})
req = self._simple_request(**params)
self.lock_page(page)
try:
while True:
try:
result = req.submit()
pywikibot.debug(u"editpage response: %s" % result,
_logger)
except api.APIError as err:
if err.code.endswith("anon") and self.logged_in():
pywikibot.debug(
"editpage: received '%s' even though bot is "
"logged in" % err.code,
_logger)
if err.code in self._ep_errors:
if isinstance(self._ep_errors[err.code], basestring):
errdata = {
'site': self,
'title': page.title(withSection=False),
'user': self.user(),
'info': err.info
}
raise Error(self._ep_errors[err.code] % errdata)
else:
raise self._ep_errors[err.code](page)
pywikibot.debug(
u"editpage: Unexpected error code '%s' received."
% err.code,
_logger)
raise
assert "edit" in result and "result" in result["edit"], result
if result["edit"]["result"] == "Success":
if "nochange" in result["edit"]:
# null edit, page not changed
pywikibot.log(u"Page [[%s]] saved without any changes."
% page.title())
return True
page.latest_revision_id = result["edit"]["newrevid"]
# See:
# https://www.mediawiki.org/wiki/API:Wikimania_2006_API_discussion#Notes
# not safe to assume that saved text is the same as sent
del page.text
return True
elif result["edit"]["result"] == "Failure":
if "captcha" in result["edit"]:
captcha = result["edit"]["captcha"]
req['captchaid'] = captcha['id']
if captcha["type"] == "math":
# TODO: Should the input be parsed through eval
# in py3?
req['captchaword'] = input(captcha["question"])
continue
elif "url" in captcha:
import webbrowser
webbrowser.open('%s://%s%s'
% (self.protocol(),
self.hostname(),
captcha["url"]))
req['captchaword'] = pywikibot.input(
"Please view CAPTCHA in your browser, "
"then type answer here:")
continue
else:
pywikibot.error(
u"editpage: unknown CAPTCHA response %s, "
u"page not saved"
% captcha)
return False
elif 'spamblacklist' in result['edit']:
raise SpamfilterError(page,
result['edit']['spamblacklist'])
elif 'code' in result['edit'] and 'info' in result['edit']:
pywikibot.error(
u"editpage: %s\n%s, "
% (result['edit']['code'], result['edit']['info']))
return False
else:
pywikibot.error(u"editpage: unknown failure reason %s"
% str(result))
return False
else:
pywikibot.error(
u"editpage: Unknown result code '%s' received; "
u"page not saved" % result["edit"]["result"])
pywikibot.log(str(result))
return False
finally:
self.unlock_page(page)
OnErrorExc = namedtuple('OnErrorExc', 'exception on_new_page')
# catalog of merge history errors for use in error messages
_mh_errors = {
'noapiwrite': 'API editing not enabled on {site} wiki',
'writeapidenied':
'User {user} is not authorized to edit on {site} wiki',
'mergehistory-fail-invalid-source': 'Source {source} is invalid '
'(this may be caused by an invalid page ID in the database)',
'mergehistory-fail-invalid-dest': 'Destination {dest} is invalid '
'(this may be caused by an invalid page ID in the database)',
'mergehistory-fail-no-change':
'History merge did not merge any revisions; '
'please recheck the page and timestamp parameters',
'mergehistory-fail-permission':
'User {user} has insufficient permissions to merge history',
'mergehistory-fail-timestamps-overlap':
'Source revisions from {source} overlap or come after '
'destination revisions of {dest}'
}
@must_be(group='sysop', right='mergehistory')
def merge_history(self, source, dest, timestamp=None, reason=None):
"""Merge revisions from one page into another.
Revisions dating up to the given timestamp in the source will be
moved into the destination page history. History merge fails if
the timestamps of source and dest revisions overlap (all source
revisions must be dated before the earliest dest revision).
@param source: Source page from which revisions will be merged
@type source: pywikibot.Page
@param dest: Destination page to which revisions will be merged
@type dest: pywikibot.Page
@param timestamp: Revisions from this page dating up to this timestamp
will be merged into the destination page (if not given or False,
all revisions will be merged)
@type timestamp: pywikibot.Timestamp
@param reason: Optional reason for the history merge
@type reason: str
"""
# Check wiki version to see if action=mergehistory is supported
min_version = MediaWikiVersion('1.27.0-wmf.13')
if MediaWikiVersion(self.version()) < min_version:
raise FatalServerError(str(self) + ' version must be '
'1.27.0-wmf.13 or newer to support the '
'history merge API.')
# Data for error messages
errdata = {
'site': self,
'source': source,
'dest': dest,
'user': self.user(),
}
# Check if pages exist before continuing
if not source.exists():
raise NoPage(source,
'Cannot merge revisions from source {source} because '
'it does not exist on {site}'
.format(**errdata))
if not dest.exists():
raise NoPage(dest,
'Cannot merge revisions to destination {dest} '
'because it does not exist on {site}'
.format(**errdata))
if source == dest: # Same pages
raise PageSaveRelatedError(
'Cannot merge revisions of {source} to itself'
.format(**errdata))
# Send the merge API request
token = self.tokens['csrf']
req = self._simple_request(action='mergehistory',
token=token)
req['from'] = source
req['to'] = dest
if reason:
req['reason'] = reason
if timestamp:
req['timestamp'] = timestamp
self.lock_page(source)
self.lock_page(dest)
try:
result = req.submit()
pywikibot.debug('mergehistory response: {result}'
.format(result=result),
_logger)
except api.APIError as err:
if err.code in self._mh_errors:
on_error = self._mh_errors[err.code]
raise Error(on_error.format(**errdata))
else:
pywikibot.debug(
"mergehistory: Unexpected error code '{code}' received"
.format(code=err.code),
_logger
)
raise
finally:
self.unlock_page(source)
self.unlock_page(dest)
if 'mergehistory' not in result:
pywikibot.error('mergehistory: {error}'.format(error=result))
raise Error('mergehistory: unexpected response')
# catalog of move errors for use in error messages
_mv_errors = {
"noapiwrite": "API editing not enabled on %(site)s wiki",
"writeapidenied":
"User %(user)s is not authorized to edit on %(site)s wiki",
"nosuppress":
'User %(user)s is not authorized to move pages without '
'creating redirects',
"cantmove-anon":
'Bot is not logged in, and anon users are not authorized to '
'move pages on %(site)s wiki',
"cantmove":
"User %(user)s is not authorized to move pages on %(site)s wiki",
"immobilenamespace":
'Pages in %(oldnamespace)s namespace cannot be moved on %(site)s '
'wiki',
'articleexists': OnErrorExc(exception=ArticleExistsConflict,
on_new_page=True),
# "protectedpage" can happen in both directions.
"protectedpage": OnErrorExc(exception=LockedPage, on_new_page=None),
"protectedtitle": OnErrorExc(exception=LockedNoPage, on_new_page=True),
"nonfilenamespace":
'Cannot move a file to %(newnamespace)s namespace on %(site)s '
'wiki',
"filetypemismatch":
'[[%(newtitle)s]] file extension does not match content of '
'[[%(oldtitle)s]]',
}
@must_be(group='user')
def movepage(self, page, newtitle, summary, movetalk=True,
noredirect=False):
"""Move a Page to a new title.
@param page: the Page to be moved (must exist)
@param newtitle: the new title for the Page
@type newtitle: unicode
@param summary: edit summary (required!)
@param movetalk: if True (default), also move the talk page if possible
@param noredirect: if True, suppress creation of a redirect from the
old title to the new one
@return: Page object with the new title
@rtype: pywikibot.Page
"""
oldtitle = page.title(withSection=False)
newlink = pywikibot.Link(newtitle, self)
newpage = pywikibot.Page(newlink)
if newlink.namespace:
newtitle = self.namespace(newlink.namespace) + ":" + newlink.title
else:
newtitle = newlink.title
if oldtitle == newtitle:
raise Error("Cannot move page %s to its own title."
% oldtitle)
if not page.exists():
raise NoPage(page,
"Cannot move page %(page)s because it "
"does not exist on %(site)s.")
token = self.tokens['move']
self.lock_page(page)
req = self._simple_request(action='move',
noredirect=noredirect,
reason=summary,
movetalk=movetalk,
token=token,
to=newtitle)
req['from'] = oldtitle # "from" is a python keyword
try:
result = req.submit()
pywikibot.debug(u"movepage response: %s" % result,
_logger)
except api.APIError as err:
if err.code.endswith("anon") and self.logged_in():
pywikibot.debug(
u"movepage: received '%s' even though bot is logged in"
% err.code,
_logger)
if err.code in self._mv_errors:
on_error = self._mv_errors[err.code]
if hasattr(on_error, 'exception'):
# LockedPage can be raised both if "from" or "to" page
# are locked for the user.
# Both pages locked is not considered
# (a double failure has low probability)
if issubclass(on_error.exception, LockedPage):
# we assume "from" is locked unless proven otherwise
failed_page = page
if newpage.exists():
for prot in self.page_restrictions(
newpage).values():
if prot[0] not in self._userinfo['groups']:
failed_page = newpage
break
else:
failed_page = newpage if on_error.on_new_page else page
raise on_error.exception(failed_page)
else:
errdata = {
'site': self,
'oldtitle': oldtitle,
'oldnamespace': self.namespace(page.namespace()),
'newtitle': newtitle,
'newnamespace': self.namespace(newlink.namespace),
'user': self.user(),
}
raise Error(on_error % errdata)
pywikibot.debug(u"movepage: Unexpected error code '%s' received."
% err.code,
_logger)
raise
finally:
self.unlock_page(page)
if "move" not in result:
pywikibot.error(u"movepage: %s" % result)
raise Error("movepage: unexpected response")
# TODO: Check for talkmove-error messages
if "talkmove-error-code" in result["move"]:
pywikibot.warning(
u"movepage: Talk page %s not moved"
% (page.toggleTalkPage().title(asLink=True)))
return pywikibot.Page(page, newtitle)
# catalog of rollback errors for use in error messages
_rb_errors = {
"noapiwrite": "API editing not enabled on %(site)s wiki",
"writeapidenied": "User %(user)s not allowed to edit through the API",
'alreadyrolled':
'Page [[%(title)s]] already rolled back; action aborted.',
} # other errors shouldn't arise because we check for those errors
@must_be('user')
def rollbackpage(self, page, **kwargs):
"""Roll back page to version before last user's edits.
The keyword arguments are those supported by the rollback API.
As a precaution against errors, this method will fail unless
the page history contains at least two revisions, and at least
one that is not by the same user who made the last edit.
@param page: the Page to be rolled back (must exist)
"""
if len(page._revisions) < 2:
raise Error(
u"Rollback of %s aborted; load revision history first."
% page.title(asLink=True))
last_rev = page.latest_revision
last_user = last_rev.user
for rev in sorted(page._revisions.values(), reverse=True,
key=lambda r: r.timestamp):
# start with most recent revision first
if rev.user != last_user:
break
else:
raise Error(
u"Rollback of %s aborted; only one user in revision history."
% page.title(asLink=True))
parameters = merge_unique_dicts(kwargs, action='rollback',
title=page,
token=self.tokens['rollback'],
user=last_user)
self.lock_page(page)
req = self._simple_request(**parameters)
try:
req.submit()
except api.APIError as err:
errdata = {
'site': self,
'title': page.title(withSection=False),
'user': self.user(),
}
if err.code in self._rb_errors:
raise Error(self._rb_errors[err.code] % errdata)
pywikibot.debug(u"rollback: Unexpected error code '%s' received."
% err.code,
_logger)
raise
finally:
self.unlock_page(page)
# catalog of delete errors for use in error messages
_dl_errors = {
"noapiwrite": "API editing not enabled on %(site)s wiki",
"writeapidenied": "User %(user)s not allowed to edit through the API",
"permissiondenied": "User %(user)s not authorized to (un)delete "
"pages on %(site)s wiki.",
'cantdelete':
'Could not delete [[%(title)s]]. Maybe it was deleted already.',
"cantundelete": "Could not undelete [[%(title)s]]. "
"Revision may not exist or was already undeleted."
} # other errors shouldn't occur because of pre-submission checks
@must_be(group='sysop')
@deprecate_arg("summary", "reason")
def deletepage(self, page, reason):
"""Delete page from the wiki. Requires appropriate privilege level.
@param page: Page to be deleted.
@type page: Page
@param reason: Deletion reason.
@type reason: basestring
"""
token = self.tokens['delete']
self.lock_page(page)
req = self._simple_request(action='delete',
token=token,
title=page,
reason=reason)
try:
req.submit()
except api.APIError as err:
errdata = {
'site': self,
'title': page.title(withSection=False),
'user': self.user(),
}
if err.code in self._dl_errors:
raise Error(self._dl_errors[err.code] % errdata)
pywikibot.debug(u"delete: Unexpected error code '%s' received."
% err.code,
_logger)
raise
else:
page.clear_cache()
finally:
self.unlock_page(page)
@must_be(group='sysop')
@deprecate_arg("summary", "reason")
def undelete_page(self, page, reason, revisions=None):
"""Undelete page from the wiki. Requires appropriate privilege level.
@param page: Page to be deleted.
@type page: Page
@param revisions: List of timestamps to restore.
If None, restores all revisions.
@type revisions: list
@param reason: Undeletion reason.
@type reason: basestring
"""
token = self.tokens['delete']
self.lock_page(page)
req = self._simple_request(action='undelete',
title=page,
reason=reason,
token=token,
timestamps=revisions)
try:
req.submit()
except api.APIError as err:
errdata = {
'site': self,
'title': page.title(withSection=False),
'user': self.user(),
}
if err.code in self._dl_errors:
raise Error(self._dl_errors[err.code] % errdata)
pywikibot.debug(u"delete: Unexpected error code '%s' received."
% err.code,
_logger)
raise
finally:
self.unlock_page(page)
_protect_errors = {
"noapiwrite": "API editing not enabled on %(site)s wiki",
"writeapidenied": "User %(user)s not allowed to edit through the API",
'permissiondenied':
'User %(user)s not authorized to protect pages on %(site)s wiki.',
"cantedit":
"User %(user)s can't protect this page because user %(user)s "
"can't edit it.",
"protect-invalidlevel": "Invalid protection level"
}
def protection_types(self):
"""
Return the protection types available on this site.
@return: protection types available
@rtype: set of unicode instances
@see: L{Siteinfo._get_default()}
"""
return set(self.siteinfo.get('restrictions')['types'])
def protection_levels(self):
"""
Return the protection levels available on this site.
@return: protection types available
@rtype: set of unicode instances
@see: L{Siteinfo._get_default()}
"""
# implemented in b73b5883d486db0e9278ef16733551f28d9e096d
return set(self.siteinfo.get('restrictions')['levels'])
@must_be(group='sysop')
@deprecate_arg("summary", "reason")
def protect(self, page, protections, reason, expiry=None, **kwargs):
"""(Un)protect a wiki page. Requires administrator status.
@param protections: A dict mapping type of protection to protection
level of that type. Valid types of protection are 'edit', 'move',
'create', and 'upload'. Valid protection levels (in MediaWiki 1.12)
are '' (equivalent to 'none'), 'autoconfirmed', and 'sysop'.
If None is given, however, that protection will be skipped.
@type protections: dict
@param reason: Reason for the action
@type reason: basestring
@param expiry: When the block should expire. This expiry will be
applied to all protections. If None, 'infinite', 'indefinite',
'never', or '' is given, there is no expiry.
@type expiry: pywikibot.Timestamp, string in GNU timestamp format
(including ISO 8601).
"""
token = self.tokens['protect']
self.lock_page(page)
protectList = [ptype + '=' + level
for ptype, level in protections.items()
if level is not None]
parameters = merge_unique_dicts(kwargs, action='protect', title=page,
token=token,
protections=protectList, reason=reason,
expiry=expiry)
req = self._simple_request(**parameters)
try:
result = req.submit()
except api.APIError as err:
errdata = {
'site': self,
'user': self.user(),
}
if err.code in self._protect_errors:
raise Error(self._protect_errors[err.code] % errdata)
pywikibot.debug(u"protect: Unexpected error code '%s' received."
% err.code,
_logger)
raise
else:
protection = {}
for d in result['protect']['protections']:
expiry = d.pop('expiry')
ptype, level = d.popitem()
if level:
protection[ptype] = (level, expiry)
page._protection = protection
finally:
self.unlock_page(page)
# TODO: implement undelete
_patrol_errors = {
"nosuchrcid": "There is no change with rcid %(rcid)s",
"nosuchrevid": "There is no change with revid %(revid)s",
"patroldisabled": "Patrolling is disabled on %(site)s wiki",
"noautopatrol": 'User %(user)s has no permission to patrol its own '
'changes, "autopatrol" is needed',
'notpatrollable':
"The revision %(revid)s can't be patrolled as it's too old."
}
@must_be(group='user')
@deprecated_args(token=None)
def patrol(self, rcid=None, revid=None, revision=None):
"""Return a generator of patrolled pages.
Pages to be patrolled are identified by rcid, revid or revision.
At least one of the parameters is mandatory.
See https://www.mediawiki.org/wiki/API:Patrol.
@param rcid: an int/string/iterable/iterator providing rcid of pages
to be patrolled.
@type rcid: iterable/iterator which returns a number or string which
contains only digits; it also supports a string (as above) or int
@param revid: an int/string/iterable/iterator providing revid of pages
to be patrolled.
@type revid: iterable/iterator which returns a number or string which
contains only digits; it also supports a string (as above) or int.
@param revision: an Revision/iterable/iterator providing Revision
object of pages to be patrolled.
@type revision: iterable/iterator which returns a Revision object; it
also supports a single Revision.
@rtype: iterator of dict with 'rcid', 'ns' and 'title'
of the patrolled page.
"""
# If patrol is not enabled, attr will be set the first time a
# request is done.
if hasattr(self, u'_patroldisabled'):
if self._patroldisabled:
return
if all(_ is None for _ in [rcid, revid, revision]):
raise Error('No rcid, revid or revision provided.')
if isinstance(rcid, int) or isinstance(rcid, basestring):
rcid = set([rcid])
if isinstance(revid, int) or isinstance(revid, basestring):
revid = set([revid])
if isinstance(revision, pywikibot.page.Revision):
revision = set([revision])
# Handle param=None.
rcid = rcid or set()
revid = revid or set()
revision = revision or set()
# TODO: remove exeception for mw < 1.22
if (revid or revision) and MediaWikiVersion(
self.version()) < MediaWikiVersion("1.22"):
raise NotImplementedError(
u'Support of "revid" parameter\n'
u'is not implemented in MediaWiki version < "1.22"')
else:
combined_revid = set(revid) | set(r.revid for r in revision)
gen = itertools.chain(
zip_longest(rcid, [], fillvalue='rcid'),
zip_longest(combined_revid, [], fillvalue='revid'))
token = self.tokens['patrol']
for idvalue, idtype in gen:
req = self._request(parameters={'action': 'patrol',
'token': token,
idtype: idvalue})
try:
result = req.submit()
except api.APIError as err:
# patrol is disabled, store in attr to avoid other requests
if err.code == u'patroldisabled':
self._patroldisabled = True
return
errdata = {
'site': self,
'user': self.user(),
}
errdata[idtype] = idvalue
if err.code in self._patrol_errors:
raise Error(self._patrol_errors[err.code] % errdata)
pywikibot.debug("protect: Unexpected error code '%s' received."
% err.code,
_logger)
raise
yield result['patrol']
@need_version('1.12')
@must_be(group='sysop')
def blockuser(self, user, expiry, reason, anononly=True, nocreate=True,
autoblock=True, noemail=False, reblock=False):
"""
Block a user for certain amount of time and for a certain reason.
@param user: The username/IP to be blocked without a namespace.
@type user: L{pywikibot.User}
@param expiry: The length or date/time when the block expires. If
'never', 'infinite', 'indefinite' it never does. If the value is
given as a basestring it's parsed by php's strtotime function:
U{http://php.net/manual/en/function.strtotime.php}
The relative format is described there:
U{http://php.net/manual/en/datetime.formats.relative.php}
It is recommended to not use a basestring if possible to be
independent of the API.
@type expiry: Timestamp/datetime (absolute),
basestring (relative/absolute) or False ('never')
@param reason: The reason for the block.
@type reason: basestring
@param anononly: Disable anonymous edits for this IP.
@type anononly: boolean
@param nocreate: Prevent account creation.
@type nocreate: boolean
@param autoblock: Automatically block the last used IP address and all
subsequent IP addresses from which this account logs in.
@type autoblock: boolean
@param noemail: Prevent user from sending email through the wiki.
@type noemail: boolean
@param reblock: If the user is already blocked, overwrite the existing
block.
@type reblock: boolean
@return: The data retrieved from the API request.
@rtype: dict
"""
token = self.tokens['block']
if expiry is False:
expiry = 'never'
req = self._simple_request(action='block', user=user.username,
expiry=expiry, reason=reason, token=token,
anononly=anononly, nocreate=nocreate,
autoblock=autoblock, noemail=noemail,
reblock=reblock)
data = req.submit()
return data
@need_version('1.12')
@must_be(group='sysop')
def unblockuser(self, user, reason=None):
"""
Remove the block for the user.
@param user: The username/IP without a namespace.
@type user: L{pywikibot.User}
@param reason: Reason for the unblock.
@type reason: basestring
"""
req = self._simple_request(action='unblock',
user=user.username,
token=self.tokens['block'],
reason=reason)
data = req.submit()
return data
@must_be(group='user')
def watch(self, pages, unwatch=False):
"""Add or remove pages from watchlist.
@param pages: A single page or a sequence of pages.
@type pages: A page object, a page-title string, or sequence of them.
Also accepts a single pipe-separated string like 'title1|title2'.
@param unwatch: If True, remove pages from watchlist;
if False add them (default).
@return: True if API returned expected response; False otherwise
@rtype: bool
"""
parameters = {'action': 'watch',
'token': self.tokens['watch'],
'unwatch': unwatch}
unwatch = 'unwatched' if unwatch else 'watched'
if MediaWikiVersion(self.version()) >= MediaWikiVersion('1.23'):
parameters['titles'] = pages
req = self._simple_request(**parameters)
results = req.submit()
return all(unwatch in r for r in results['watch'])
# MW version < 1.23
if isinstance(pages, str):
if '|' in pages:
pages = pages.split('|')
else:
pages = (pages,)
for page in pages:
parameters['title'] = page
req = self._simple_request(**parameters)
result = req.submit()
if unwatch not in result['watch']:
return False
return True
@must_be(group='user')
@deprecated('Site().watch')
def watchpage(self, page, unwatch=False):
"""
Add or remove page from watchlist.
DEPRECATED: Use Site().watch() instead.
@param page: A single page.
@type page: A page object, a page-title string.
@param unwatch: If True, remove page from watchlist;
if False (default), add it.
@return: True if API returned expected response; False otherwise
@rtype: bool
"""
parameters = {'action': 'watch',
'title': page,
'token': self.tokens['watch'],
'unwatch': unwatch}
req = self._simple_request(**parameters)
result = req.submit()
if "watch" not in result:
pywikibot.error('watchpage: Unexpected API response:\n%s' % result)
return False
return ('unwatched' if unwatch else 'watched') in result["watch"]
@must_be(group='user')
def purgepages(self, pages, **kwargs):
"""
Purge the server's cache for one or multiple pages.
@param pages: list of Page objects
@return: True if API returned expected response; False otherwise
@rtype: bool
"""
req = self._simple_request(action='purge',
titles=[page for page in set(pages)])
linkupdate_args = ['forcelinkupdate', 'forcerecursivelinkupdate']
for arg in kwargs:
if arg in linkupdate_args + ['redirects', 'converttitles']:
req[arg] = kwargs[arg]
result = req.submit()
if 'purge' not in result:
pywikibot.error(
'purgepages: Unexpected API response:\n%s' % result)
return False
result = result['purge']
purged = ['purged' in page for page in result]
if any(kwargs.get(arg) for arg in linkupdate_args):
purged += ['linkupdate' in page for page in result]
return all(purged)
@deprecated("Site().exturlusage")
def linksearch(self, siteurl, limit=None, euprotocol=None):
"""Backwards-compatible interface to exturlusage()."""
return self.exturlusage(siteurl, total=limit, protocol=euprotocol)
def _get_titles_with_hash(self, hash_found=None):
"""Helper for the deprecated method get(Files|Images)FromAnHash."""
# This should be removed with together with get(Files|Images)FromHash
if hash_found is None:
# This makes absolutely NO sense.
pywikibot.warning(
'The "hash_found" parameter in "getFilesFromAnHash" and '
'"getImagesFromAnHash" are not optional.')
return
return [image.title(withNamespace=False)
for image in self.allimages(sha1=hash_found)]
@deprecated('Site().allimages')
def getFilesFromAnHash(self, hash_found=None):
"""
Return all files that have the same hash.
DEPRECATED: Use L{APISite.allimages} instead using 'sha1'.
"""
return self._get_titles_with_hash(hash_found)
@deprecated('Site().allimages')
def getImagesFromAnHash(self, hash_found=None):
"""
Return all images that have the same hash.
DEPRECATED: Use L{APISite.allimages} instead using 'sha1'.
"""
return self._get_titles_with_hash(hash_found)
@must_be(group='user')
def is_uploaddisabled(self):
"""Return True if upload is disabled on site.
When the version is at least 1.27wmf9, uses general siteinfo.
If not called directly, it is cached by the first attempted
upload action.
"""
if MediaWikiVersion(self.version()) >= MediaWikiVersion('1.27wmf9'):
return not self._siteinfo.get('general')['uploadsenabled']
if hasattr(self, '_uploaddisabled'):
return self._uploaddisabled
else:
# attempt a fake upload; on enabled sites will fail for:
# missingparam: One of the parameters
# filekey, file, url, statuskey is required
# TODO: is there another way?
try:
req = self._request(throttle=False,
parameters={'action': 'upload',
'token': self.tokens['edit']})
req.submit()
except api.APIError as error:
if error.code == u'uploaddisabled':
self._uploaddisabled = True
elif error.code == u'missingparam':
# If the upload module is enabled, the above dummy request
# does not have sufficient parameters and will cause a
# 'missingparam' error.
self._uploaddisabled = False
else:
# Unexpected error
raise
return self._uploaddisabled
def stash_info(self, file_key, props=False):
"""Get the stash info for a given file key."""
if not props:
props = False
req = self._simple_request(
action='query', prop='stashimageinfo', siifilekey=file_key,
siiprop=props)
return req.submit()['query']['stashimageinfo'][0]
@deprecate_arg('imagepage', 'filepage')
def upload(self, filepage, source_filename=None, source_url=None,
comment=None, text=None, watch=False, ignore_warnings=False,
chunk_size=0, _file_key=None, _offset=0, _verify_stash=None,
report_success=None):
"""
Upload a file to the wiki.
Either source_filename or source_url, but not both, must be provided.
@param filepage: a FilePage object from which the wiki-name of the
file will be obtained.
@param source_filename: path to the file to be uploaded
@param source_url: URL of the file to be uploaded
@param comment: Edit summary; if this is not provided, then
filepage.text will be used. An empty summary is not permitted.
This may also serve as the initial page text (see below).
@param text: Initial page text; if this is not set, then
filepage.text will be used, or comment.
@param watch: If true, add filepage to the bot user's watchlist
@param ignore_warnings: It may be a static boolean, a callable
returning a boolean or an iterable. The callable gets a list of
UploadWarning instances and the iterable should contain the warning
codes for which an equivalent callable would return True if all
UploadWarning codes are in thet list. If the result is False it'll
not continue uploading the file and otherwise disable any warning
and reattempt to upload the file. NOTE: If report_success is True
or None it'll raise an UploadWarning exception if the static
boolean is False.
@type ignore_warnings: bool or callable or iterable of str
@param chunk_size: The chunk size in bytesfor chunked uploading (see
U{https://www.mediawiki.org/wiki/API:Upload#Chunked_uploading}). It
will only upload in chunks, if the version number is 1.20 or higher
and the chunk size is positive but lower than the file size.
@type chunk_size: int
@param _file_key: Reuses an already uploaded file using the filekey. If
None (default) it will upload the file.
@type _file_key: str or None
@param _offset: When file_key is not None this can be an integer to
continue a previously canceled chunked upload. If False it treats
that as a finished upload. If True it requests the stash info from
the server to determine the offset. By default starts at 0.
@type _offset: int or bool
@param _verify_stash: Requests the SHA1 and file size uploaded and
compares it to the local file. Also verifies that _offset is
matching the file size if the _offset is an int. If _offset is
False if verifies that the file size match with the local file. If
None it'll verifies the stash when a file key and offset is given.
@type _verify_stash: bool or None
@param report_success: If the upload was successful it'll print a
success message and if ignore_warnings is set to False it'll
raise an UploadWarning if a warning occurred. If it's None
(default) it'll be True if ignore_warnings is a bool and False
otherwise. If it's True or None ignore_warnings must be a bool.
@return: It returns True if the upload was successful and False
otherwise.
@rtype: bool
"""
def create_warnings_list(response):
return [
api.UploadWarning(
warning,
upload_warnings.get(warning, '%(msg)s') % {'msg': data},
_file_key, response['offset'])
for warning, data in response['warnings'].items()]
upload_warnings = {
# map API warning codes to user error messages
# %(msg)s will be replaced by message string from API response
'duplicate-archive':
'The file is a duplicate of a deleted file %(msg)s.',
'was-deleted': "The file %(msg)s was previously deleted.",
'emptyfile': "File %(msg)s is empty.",
'exists': "File %(msg)s already exists.",
'duplicate': "Uploaded file is a duplicate of %(msg)s.",
'badfilename': "Target filename is invalid.",
'filetype-unwanted-type': "File %(msg)s type is unwanted type.",
'exists-normalized': 'File exists with different extension as '
'"%(msg)s".',
'bad-prefix': 'Target filename has a bad prefix %(msg)s.',
'page-exists':
'Target filename exists but with a different file %(msg)s.',
# API-returned message string will be timestamps, not much use here
'nochange': 'The upload is an exact duplicate of the current '
'version of this file.',
'duplicateversions': 'The upload is an exact duplicate of older '
'version(s) of this file.',
}
# An offset != 0 doesn't make sense without a file key
assert(_offset == 0 or _file_key is not None)
# check for required user right
if "upload" not in self.userinfo["rights"]:
raise Error(
"User '%s' does not have upload rights on site %s."
% (self.user(), self))
# check for required parameters
if bool(source_filename) == bool(source_url):
raise ValueError("APISite.upload: must provide either "
"source_filename or source_url, not both.")
if comment is None:
comment = filepage.text
if not comment:
raise ValueError("APISite.upload: cannot upload file without "
"a summary/description.")
if report_success is None:
report_success = isinstance(ignore_warnings, bool)
if report_success is True:
if not isinstance(ignore_warnings, bool):
raise ValueError('report_success may only be set to True when '
'ignore_warnings is a boolean')
issue_deprecation_warning('"ignore_warnings" as a boolean and '
'"report_success" is True or None',
'"report_success=False" or define '
'"ignore_warnings" as callable/iterable',
3)
if isinstance(ignore_warnings, Iterable):
ignored_warnings = ignore_warnings
ignore_warnings = lambda warnings: all( # flake8: disable=E731
w.code in ignored_warnings for w in warnings)
ignore_all_warnings = not callable(ignore_warnings) and ignore_warnings
if text is None:
text = filepage.text
if not text:
text = comment
token = self.tokens['edit']
result = None
file_page_title = filepage.title(withNamespace=False)
file_size = None
offset = _offset
# make sure file actually exists
if source_filename:
if os.path.isfile(source_filename):
file_size = os.path.getsize(source_filename)
elif offset is not False:
raise ValueError("File '%s' does not exist."
% source_filename)
if source_filename and _file_key:
assert offset is False or file_size is not None
if _verify_stash is None:
_verify_stash = True
if (offset is not False and offset is not True and
offset > file_size):
raise ValueError(
'For the file key "{0}" the offset was set to {1} '
'while the file is only {2} bytes large.'.format(
_file_key, offset, file_size))
if _verify_stash or offset is True:
if not _file_key:
raise ValueError('Without a file key it cannot request the '
'stash information')
if not source_filename:
raise ValueError('Can request stash information only when '
'using a file name.')
props = ['size']
if _verify_stash:
props += ['sha1']
stash_info = self.stash_info(_file_key, props)
if offset is True:
offset = stash_info['size']
elif offset is False:
if file_size != stash_info['size']:
raise ValueError(
'For the file key "{0}" the server reported a size '
'{1} while the file size is {2}'
.format(_file_key, stash_info['size'], file_size))
elif offset is not False and offset != stash_info['size']:
raise ValueError(
'For the file key "{0}" the server reported a size {1} '
'while the offset was {2}'.format(
_file_key, stash_info['size'], offset))
if _verify_stash:
# The SHA1 was also requested so calculate and compare it
assert 'sha1' in stash_info, \
'sha1 not in stash info: {0}'.format(stash_info)
sha1 = compute_file_hash(source_filename, bytes_to_read=offset)
if sha1 != stash_info['sha1']:
raise ValueError(
'The SHA1 of {0} bytes of the stashed "{1}" is {2} '
'while the local file is {3}'.format(
offset, _file_key, stash_info['sha1'], sha1))
assert offset is not True
if _file_key and file_size is None:
assert offset is False
if _file_key and offset is False or offset == file_size:
pywikibot.log('Reused already upload file using '
'filekey "{0}"'.format(_file_key))
# TODO: Use sessionkey instead of filekey if necessary
final_request = self._simple_request(action='upload', token=token,
filename=file_page_title,
comment=comment, text=text,
filekey=_file_key)
elif source_filename:
# TODO: Dummy value to allow also Unicode names, see bug T75661
mime_filename = 'FAKE-NAME'
# upload local file
throttle = True
filesize = os.path.getsize(source_filename)
chunked_upload = (chunk_size > 0 and chunk_size < filesize and
MediaWikiVersion(
self.version()) >= MediaWikiVersion('1.20'))
with open(source_filename, 'rb') as f:
final_request = self._request(
throttle=throttle, parameters={
'action': 'upload', 'token': token, 'text': text,
'filename': file_page_title, 'comment': comment})
if chunked_upload:
if offset > 0:
pywikibot.log('Continuing upload from byte '
'{0}'.format(offset))
while True:
f.seek(offset)
chunk = f.read(chunk_size)
req = self._request(
throttle=throttle, mime=True,
parameters={
'action': 'upload',
'token': token,
'stash': True,
'filesize': filesize,
'offset': offset,
'filename': file_page_title,
'ignorewarnings': ignore_all_warnings})
req.mime_params['chunk'] = (
chunk, ('application', 'octet-stream'),
{'filename': mime_filename})
if _file_key:
req['filekey'] = _file_key
try:
data = req.submit()['upload']
self._uploaddisabled = False
except api.APIError as error:
# TODO: catch and process foreseeable errors
if error.code == u'uploaddisabled':
self._uploaddisabled = True
elif error.code == u'stashfailed' and \
'offset' in error.other:
# TODO: Ask MediaWiki to change this
# ambiguous error code.
new_offset = int(error.other['offset'])
# If the offset returned from the server
# (the offset it expects now) is equal to
# the offset we sent it, there must be
# something else that prevented the upload,
# instead of simple offset mismatch. This
# also prevents infinite loops when we
# upload the same chunk again and again,
# every time ApiError.
if offset != new_offset:
pywikibot.log(
'Old offset: {0}; Returned '
'offset: {1}; Chunk size: '
'{2}'.format(offset, new_offset,
len(chunk)))
pywikibot.warning('Attempting to correct '
'automatically from '
'offset mismatch error.')
offset = new_offset
continue
raise error
if 'nochange' in data: # in simulation mode
break
_file_key = data['filekey']
if 'warnings' in data and not ignore_all_warnings:
if callable(ignore_warnings):
restart = False
if 'offset' not in data:
# This is a result of a warning in the
# first chunk. The chunk is not actually
# stashed so upload must be restarted if
# the warning is allowed.
# T112416 and T112405#1637544
restart = True
data['offset'] = True
if ignore_warnings(create_warnings_list(data)):
# Future warnings of this run
# can be ignored
if restart:
return self.upload(
filepage, source_filename,
source_url, comment, text, watch,
True, chunk_size, None, 0,
report_success=False)
ignore_warnings = True
ignore_all_warnings = True
offset = data['offset']
continue
else:
return False
result = data
if 'offset' not in result:
result['offset'] = 0
break
throttle = False
if 'offset' in data:
new_offset = int(data['offset'])
if offset + len(chunk) != new_offset:
pywikibot.log('Old offset: {0}; Returned '
'offset: {1}; Chunk size: '
'{2}'.format(offset, new_offset,
len(chunk)))
pywikibot.warning('Unexpected offset.')
offset = new_offset
else:
pywikibot.warning('Offset was not supplied.')
offset += len(chunk)
if data['result'] != 'Continue': # finished
pywikibot.log('Finished uploading last chunk.')
final_request['filekey'] = _file_key
break
else: # not chunked upload
if _file_key:
final_request['filekey'] = _file_key
else:
file_contents = f.read()
filetype = (mimetypes.guess_type(source_filename)[0] or
'application/octet-stream')
final_request.mime_params = {
'file': (file_contents, filetype.split('/'),
{'filename': mime_filename})
}
else:
# upload by URL
if "upload_by_url" not in self.userinfo["rights"]:
raise Error(
"User '%s' is not authorized to upload by URL on site %s."
% (self.user(), self))
final_request = self._simple_request(
action='upload', filename=file_page_title,
url=source_url, comment=comment, text=text, token=token)
if not result:
final_request['watch'] = watch
final_request['ignorewarnings'] = ignore_all_warnings
try:
result = final_request.submit()
self._uploaddisabled = False
except api.APIError as error:
# TODO: catch and process foreseeable errors
if error.code == u'uploaddisabled':
self._uploaddisabled = True
raise error
result = result["upload"]
pywikibot.debug(result, _logger)
if 'warnings' in result and not ignore_all_warnings:
if 'filekey' in result:
_file_key = result['filekey']
elif 'sessionkey' in result:
# TODO: Probably needs to be reflected in the API call above
_file_key = result['sessionkey']
pywikibot.warning('Using sessionkey instead of filekey.')
else:
_file_key = None
pywikibot.warning('No filekey defined.')
if not report_success:
if 'offset' not in result:
result['offset'] = True
if ignore_warnings(create_warnings_list(result)):
return self.upload(
filepage, source_filename, source_url, comment, text,
watch, True, chunk_size, _file_key,
result['offset'], report_success=False)
else:
return False
warn('When ignore_warnings=False in APISite.upload will change '
'from raising an UploadWarning into behaving like being a '
'callable returning False.', DeprecationWarning, 3)
if len(result['warnings']) > 1:
warn('The upload returned {0} warnings: '
'{1}'.format(len(result['warnings']),
', '.join(result['warnings'])),
UserWarning, 3)
warning = list(result["warnings"].keys())[0]
message = result["warnings"][warning]
raise api.UploadWarning(warning, upload_warnings[warning]
% {'msg': message},
file_key=_file_key,
offset=result.get('offset', False))
elif "result" not in result:
pywikibot.output(u"Upload: unrecognized response: %s" % result)
if result["result"] == "Success":
if report_success:
pywikibot.output(u"Upload successful.")
# If we receive a nochange, that would mean we're in simulation
# mode, don't attempt to access imageinfo
if "nochange" not in result:
filepage._load_file_revisions([result["imageinfo"]])
return result['result'] == 'Success'
@deprecated_args(number='total',
repeat=None,
namespace="namespaces",
rcshow=None,
rc_show=None,
get_redirect=None)
@deprecated_args(step=None)
def newpages(self, user=None, returndict=False,
start=None, end=None, reverse=False, showBot=False,
showRedirects=False, excludeuser=None,
showPatrolled=None, namespaces=None, total=None):
"""Yield new articles (as Page objects) from recent changes.
Starts with the newest article and fetches the number of articles
specified in the first argument.
The objects yielded are dependent on parameter returndict.
When true, it yields a tuple composed of a Page object and a dict of
attributes.
When false, it yields a tuple composed of the Page object,
timestamp (unicode), length (int), an empty unicode string, username
or IP address (str), comment (unicode).
@param namespaces: only iterate pages in these namespaces
@type namespaces: iterable of basestring or Namespace key,
or a single instance of those types. May be a '|' separated
list of namespace identifiers.
@raises KeyError: a namespace identifier was not resolved
@raises TypeError: a namespace identifier has an inappropriate
type such as NoneType or bool
"""
# TODO: update docstring
# N.B. API still provides no way to access Special:Newpages content
# directly, so we get new pages indirectly through 'recentchanges'
gen = self.recentchanges(
start=start, end=end, reverse=reverse,
namespaces=namespaces, changetype="new", user=user,
excludeuser=excludeuser, showBot=showBot,
showRedirects=showRedirects, showPatrolled=showPatrolled,
total=total
)
for pageitem in gen:
newpage = pywikibot.Page(self, pageitem['title'])
if returndict:
yield (newpage, pageitem)
else:
yield (newpage, pageitem['timestamp'], pageitem['newlen'],
u'', pageitem['user'], pageitem['comment'])
@deprecated('APISite.logevents(logtype="upload")')
@deprecated_args(lestart='start', leend='end', leuser='user', letitle=None,
repeat=None, number='total', step=None)
def newfiles(self, user=None, start=None, end=None, reverse=False,
total=None):
"""Yield information about newly uploaded files.
DEPRECATED: Use logevents(logtype='upload') instead.
Yields a tuple of FilePage, Timestamp, user(unicode), comment(unicode).
N.B. the API does not provide direct access to Special:Newimages, so
this is derived from the "upload" log events instead.
"""
for event in self.logevents(logtype="upload", user=user,
start=start, end=end, reverse=reverse,
total=total):
filepage = event.page()
date = event.timestamp()
user = event.user()
comment = event.comment() or u''
yield (filepage, date, user, comment)
@deprecated('APISite.logevents(logtype="upload")')
@deprecated_args(number='total', repeat=None)
def newimages(self, *args, **kwargs):
"""
Yield information about newly uploaded files.
DEPRECATED: Use logevents(logtype='upload') instead.
"""
return self.newfiles(*args, **kwargs)
@deprecated_args(number='total', step=None, repeat=None)
def longpages(self, total=None):
"""Yield Pages and lengths from Special:Longpages.
Yields a tuple of Page object, length(int).
@param total: number of pages to return
"""
lpgen = self._generator(api.ListGenerator,
type_arg="querypage", qppage="Longpages",
total=total)
for pageitem in lpgen:
yield (pywikibot.Page(self, pageitem['title']),
int(pageitem['value']))
@deprecated_args(number='total', step=None, repeat=None)
def shortpages(self, total=None):
"""Yield Pages and lengths from Special:Shortpages.
Yields a tuple of Page object, length(int).
@param total: number of pages to return
"""
spgen = self._generator(api.ListGenerator,
type_arg="querypage", qppage="Shortpages",
total=total)
for pageitem in spgen:
yield (pywikibot.Page(self, pageitem['title']),
int(pageitem['value']))
@deprecated_args(number='total', step=None, repeat=None)
def deadendpages(self, total=None):
"""Yield Page objects retrieved from Special:Deadendpages.
@param total: number of pages to return
"""
degen = self._generator(api.PageGenerator,
type_arg="querypage", gqppage="Deadendpages",
total=total)
return degen
@deprecated_args(number='total', step=None, repeat=None)
def ancientpages(self, total=None):
"""Yield Pages, datestamps from Special:Ancientpages.
@param total: number of pages to return
"""
apgen = self._generator(api.ListGenerator,
type_arg="querypage", qppage="Ancientpages",
total=total)
for pageitem in apgen:
yield (pywikibot.Page(self, pageitem['title']),
pywikibot.Timestamp.fromISOformat(pageitem['timestamp']))
@deprecated_args(number='total', step=None, repeat=None)
def lonelypages(self, total=None):
"""Yield Pages retrieved from Special:Lonelypages.
@param total: number of pages to return
"""
lpgen = self._generator(api.PageGenerator,
type_arg="querypage", gqppage="Lonelypages",
total=total)
return lpgen
@deprecated_args(number='total', step=None, repeat=None)
def unwatchedpages(self, total=None):
"""Yield Pages from Special:Unwatchedpages (requires Admin privileges).
@param total: number of pages to return
"""
uwgen = self._generator(api.PageGenerator,
type_arg="querypage", gqppage="Unwatchedpages",
total=total)
return uwgen
@deprecated_args(step=None)
def wantedpages(self, total=None):
"""Yield Pages from Special:Wantedpages.
@param total: number of pages to return
"""
wpgen = self._generator(api.PageGenerator,
type_arg="querypage", gqppage="Wantedpages",
total=total)
return wpgen
@deprecated_args(number='total', step=None, repeat=None)
def wantedcategories(self, total=None):
"""Yield Pages from Special:Wantedcategories.
@param total: number of pages to return
"""
wcgen = self._generator(api.PageGenerator,
type_arg='querypage',
gqppage='Wantedcategories',
total=total)
return wcgen
@deprecated_args(number='total', step=None, repeat=None)
def uncategorizedcategories(self, total=None):
"""Yield Categories from Special:Uncategorizedcategories.
@param total: number of pages to return
"""
ucgen = self._generator(api.PageGenerator,
type_arg="querypage",
gqppage="Uncategorizedcategories",
total=total)
return ucgen
@deprecated_args(number='total', step=None, repeat=None)
def uncategorizedimages(self, total=None):
"""Yield FilePages from Special:Uncategorizedimages.
@param total: number of pages to return
"""
uigen = self._generator(api.PageGenerator,
type_arg="querypage",
gqppage="Uncategorizedimages",
total=total)
return uigen
# synonym
uncategorizedfiles = uncategorizedimages
@deprecated_args(number='total', step=None, repeat=None)
def uncategorizedpages(self, total=None):
"""Yield Pages from Special:Uncategorizedpages.
@param total: number of pages to return
"""
upgen = self._generator(api.PageGenerator,
type_arg="querypage",
gqppage="Uncategorizedpages",
total=total)
return upgen
@deprecated_args(number='total', step=None, repeat=None)
def uncategorizedtemplates(self, total=None):
"""Yield Pages from Special:Uncategorizedtemplates.
@param total: number of pages to return
"""
utgen = self._generator(api.PageGenerator,
type_arg="querypage",
gqppage="Uncategorizedtemplates",
total=total)
return utgen
@deprecated_args(number='total', step=None, repeat=None)
def unusedcategories(self, total=None):
"""Yield Category objects from Special:Unusedcategories.
@param total: number of pages to return
"""
ucgen = self._generator(api.PageGenerator,
type_arg="querypage",
gqppage="Unusedcategories",
total=total)
return ucgen
@deprecated_args(extension=None, number='total', step=None, repeat=None)
def unusedfiles(self, total=None):
"""Yield FilePage objects from Special:Unusedimages.
@param total: number of pages to return
"""
uigen = self._generator(api.PageGenerator,
type_arg="querypage",
gqppage="Unusedimages",
total=total)
return uigen
@deprecated("Site().unusedfiles()")
@deprecated_args(extension=None, number='total', step=None, repeat=None)
def unusedimages(self, total=None):
"""Yield FilePage objects from Special:Unusedimages.
DEPRECATED: Use L{APISite.unusedfiles} instead.
"""
return self.unusedfiles(total)
@deprecated_args(number='total', step=None, repeat=None)
def withoutinterwiki(self, total=None):
"""Yield Pages without language links from Special:Withoutinterwiki.
@param total: number of pages to return
"""
wigen = self._generator(api.PageGenerator,
type_arg="querypage",
gqppage="Withoutinterwiki",
total=total)
return wigen
@need_version("1.18")
@deprecated_args(step=None)
def broken_redirects(self, total=None):
"""Yield Pages with broken redirects from Special:BrokenRedirects.
@param total: number of pages to return
"""
brgen = self._generator(api.PageGenerator,
type_arg="querypage",
gqppage="BrokenRedirects",
total=total)
return brgen
@need_version("1.18")
@deprecated_args(step=None)
def double_redirects(self, total=None):
"""Yield Pages with double redirects from Special:DoubleRedirects.
@param total: number of pages to return
"""
drgen = self._generator(api.PageGenerator,
type_arg="querypage",
gqppage="DoubleRedirects",
total=total)
return drgen
@need_version("1.18")
@deprecated_args(step=None)
def redirectpages(self, total=None):
"""Yield redirect pages from Special:ListRedirects.
@param total: number of pages to return
"""
lrgen = self._generator(api.PageGenerator,
type_arg="querypage",
gqppage="Listredirects",
total=total)
return lrgen
@deprecated_args(step=None)
@need_extension('Wikibase Client')
def unconnected_pages(self, total=None):
"""Yield Page objects from Special:UnconnectedPages.
@param total: number of pages to return
"""
upgen = self._generator(api.PageGenerator,
type_arg='querypage',
gqppage='UnconnectedPages',
total=total)
return upgen
@deprecated_args(lvl='level')
def protectedpages(self, namespace=0, type='edit', level=False,
total=None):
"""
Return protected pages depending on protection level and type.
For protection types which aren't 'create' it uses L{APISite.allpages},
while it uses for 'create' the 'query+protectedtitles' module.
@param namespaces: The searched namespace.
@type namespaces: int or Namespace or str
@param type: The protection type to search for (default 'edit').
@type type: str
@param level: The protection level (like 'autoconfirmed'). If False it
shows all protection levels.
@type level: str or False
@return: The pages which are protected.
@rtype: generator of Page
"""
namespaces = self.namespaces.resolve(namespace)
# always assert that, so we are be sure that type could be 'create'
assert 'create' in self.protection_types(), \
"'create' should be a valid protection type."
if type == 'create':
if MediaWikiVersion(self.version()) < MediaWikiVersion('1.15'):
raise NotImplementedError(
'protectedpages(type=create) requires MW 1.15+')
return self._generator(
api.PageGenerator, type_arg='protectedtitles',
namespaces=namespaces, gptlevel=level, total=total)
else:
return self.allpages(namespace=namespaces[0], protect_level=level,
protect_type=type, total=total)
@need_version('1.21')
def get_property_names(self, force=False):
"""
Get property names for pages_with_property().
@param force: force to retrieve userinfo ignoring cache
@type force: bool
"""
if force or not hasattr(self, '_property_names'):
ppngen = self._generator(api.ListGenerator, 'pagepropnames')
self._property_names = [pn['propname'] for pn in ppngen]
return self._property_names
@need_version('1.21')
def pages_with_property(self, propname, total=None):
"""Yield Page objects from Special:PagesWithProp.
@param propname: must be a valid property.
@type propname: str
@param total: number of pages to return
@type total: int or None
@return: return a generator of Page objects
@rtype: iterator
"""
if propname not in self.get_property_names():
raise NotImplementedError(
'"{0}" is not a valid page property'.format(propname))
pwpgen = self._generator(api.PageGenerator,
type_arg='pageswithprop',
gpwppropname=propname,
total=total)
return pwpgen
@need_version("1.18")
def compare(self, old, diff):
"""
Corresponding method to the 'action=compare' API action.
See: https://en.wikipedia.org/w/api.php?action=help&modules=compare
Use pywikibot.diff's html_comparator() method to parse result.
@param old: starting revision ID, title, Page, or Revision
@type old: int, str, pywikibot.Page, or pywikibot.Page.Revision
@param diff: ending revision ID, title, Page, or Revision
@type diff: int, str, pywikibot.Page, or pywikibot.Page.Revision
@return: Returns an HTML string of a diff between two revisions.
@rtype: str
"""
# check old and diff types
def get_param(item):
if isinstance(item, basestring):
return 'title', item
elif isinstance(item, pywikibot.Page):
return 'title', item.title()
elif isinstance(item, int):
return 'rev', item
elif isinstance(item, pywikibot.page.Revision):
return 'rev', item.revid
else:
return None
old = get_param(old)
if not old:
raise TypeError('old parameter is of invalid type')
diff = get_param(diff)
if not diff:
raise TypeError('diff parameter is of invalid type')
params = {'action': 'compare',
'from{0}'.format(old[0]): old[1],
'to{0}'.format(diff[0]): diff[1]}
req = self._simple_request(**params)
data = req.submit()
comparison = data['compare']['*']
return comparison
@need_extension('Thanks')
def thank_revision(self, revid, source=None):
"""Corresponding method to the 'action=thank' API action.
@param revid: Revision ID for the revision to be thanked.
@type revid: int
@param source: A source for the thanking operation.
@type source: str
@raise APIError: On thanking oneself or other API errors.
@return: The API response.
"""
token = self.tokens['csrf']
req = self._simple_request(action='thank', rev=revid, token=token,
source=source)
data = req.submit()
if data['result']['success'] != 1:
raise api.APIError('Thanking unsuccessful')
return data
# Flow API calls
@need_extension('Flow')
def load_board(self, page):
"""
Retrieve the data for a Flow board.
@param page: A Flow board
@type page: Board
@return: A dict representing the board's metadata.
@rtype: dict
"""
req = self._simple_request(action='flow', page=page,
submodule='view-topiclist',
vtllimit=1)
data = req.submit()
return data['flow']['view-topiclist']['result']['topiclist']
@need_extension('Flow')
def load_topiclist(self, page, format='wikitext', limit=100,
sortby='newest', toconly=False, offset=None,
offset_id=None, reverse=False, include_offset=False):
"""
Retrieve the topiclist of a Flow board.
@param page: A Flow board
@type page: Board
@param format: The content format to request the data in.
@type format: str (either 'wikitext', 'html', or 'fixed-html')
@param limit: The number of topics to fetch in each request.
@type limit: int
@param sortby: Algorithm to sort topics by.
@type sortby: str (either 'newest' or 'updated')
@param toconly: Whether to only include information for the TOC.
@type toconly: bool
@param offset: The timestamp to start at (when sortby is 'updated').
@type offset: Timestamp or equivalent str
@param offset_id: The topic UUID to start at (when sortby is 'newest').
@type offset_id: str (in the form of a UUID)
@param reverse: Whether to reverse the topic ordering.
@type reverse: bool
@param include_offset: Whether to include the offset topic.
@type include_offset: bool
@return: A dict representing the board's topiclist.
@rtype: dict
"""
if offset:
offset = pywikibot.Timestamp.fromtimestampformat(offset)
offset_dir = reverse and 'rev' or 'fwd'
params = {'action': 'flow', 'submodule': 'view-topiclist',
'page': page,
'vtlformat': format, 'vtlsortby': sortby,
'vtllimit': limit, 'vtloffset-dir': offset_dir,
'vtloffset': offset, 'vtloffset-id': offset_id,
'vtlinclude-offset': include_offset, 'vtltoconly': toconly}
req = self._request(parameters=params)
data = req.submit()
return data['flow']['view-topiclist']['result']['topiclist']
@need_extension('Flow')
def load_topic(self, page, format):
"""
Retrieve the data for a Flow topic.
@param page: A Flow topic
@type page: Topic
@param format: The content format to request the data in.
@type format: str (either 'wikitext', 'html', or 'fixed-html')
@return: A dict representing the topic's data.
@rtype: dict
"""
req = self._simple_request(action='flow', page=page,
submodule='view-topic',
vtformat=format)
data = req.submit()
return data['flow']['view-topic']['result']['topic']
@need_extension('Flow')
def load_post_current_revision(self, page, post_id, format):
"""
Retrieve the data for a post to a Flow topic.
@param page: A Flow topic
@type page: Topic
@param post_id: The UUID of the Post
@type post_id: unicode
@param format: The content format used for the returned content
@type format: unicode (either 'wikitext', 'html', or 'fixed-html')
@return: A dict representing the post data for the given UUID.
@rtype: dict
"""
req = self._simple_request(action='flow', page=page,
submodule='view-post', vppostId=post_id,
vpformat=format)
data = req.submit()
return data['flow']['view-post']['result']['topic']
@must_be('user')
@need_extension('Flow')
def create_new_topic(self, page, title, content, format):
"""
Create a new topic on a Flow board.
@param page: A Flow board
@type page: Board
@param title: The title of the new topic (must be in plaintext)
@type title: unicode
@param content: The content of the topic's initial post
@type content: unicode
@param format: The content format of the value supplied for content
@type format: unicode (either 'wikitext' or 'html')
@return: The metadata of the new topic
@rtype: dict
"""
token = self.tokens['csrf']
params = {'action': 'flow', 'page': page, 'token': token,
'submodule': 'new-topic', 'ntformat': format,
'nttopic': title, 'ntcontent': content}
req = self._request(parameters=params, use_get=False)
data = req.submit()
return data['flow']['new-topic']['committed']['topiclist']
@must_be('user')
@need_extension('Flow')
def reply_to_post(self, page, reply_to_uuid, content, format):
"""Reply to a post on a Flow topic.
@param page: A Flow topic
@type page: Topic
@param reply_to_uuid: The UUID of the Post to create a reply to
@type reply_to_uuid: unicode
@param content: The content of the reply
@type content: unicode
@param format: The content format used for the supplied content
@type format: unicode (either 'wikitext' or 'html')
@return: Metadata returned by the API
@rtype: dict
"""
token = self.tokens['csrf']
params = {'action': 'flow', 'page': page, 'token': token,
'submodule': 'reply', 'repreplyTo': reply_to_uuid,
'repcontent': content, 'repformat': format}
req = self._request(parameters=params, use_get=False)
data = req.submit()
return data['flow']['reply']['committed']['topic']
@must_be('user', 'flow-lock')
@need_extension('Flow')
def lock_topic(self, page, lock, reason):
"""
Lock or unlock a Flow topic.
@param page: A Flow topic
@type page: Topic
@param lock: Whether to lock or unlock the topic
@type lock: bool (True corresponds to locking the topic.)
@param reason: The reason to lock or unlock the topic
@type reason: unicode
@return: Metadata returned by the API
@rtype: dict
"""
status = 'lock' if lock else 'unlock'
token = self.tokens['csrf']
params = {'action': 'flow', 'page': page, 'token': token,
'submodule': 'lock-topic', 'cotreason': reason,
'cotmoderationState': status}
req = self._request(parameters=params, use_get=False)
data = req.submit()
return data['flow']['lock-topic']['committed']['topic']
@must_be('user')
@need_extension('Flow')
def moderate_topic(self, page, state, reason):
"""
Moderate a Flow topic.
@param page: A Flow topic
@type page: Topic
@param state: The new moderation state
@type state: str
@param reason: The reason to moderate the topic
@type reason: unicode
@return: Metadata returned by the API
@rtype: dict
"""
token = self.tokens['csrf']
params = {'action': 'flow', 'page': page, 'token': token,
'submodule': 'moderate-topic', 'mtreason': reason,
'mtmoderationState': state}
req = self._request(parameters=params, use_get=False)
data = req.submit()
return data['flow']['moderate-topic']['committed']['topic']
@must_be('user', 'flow-delete')
@need_extension('Flow')
def delete_topic(self, page, reason):
"""
Delete a Flow topic.
@param page: A Flow topic
@type page: Topic
@param reason: The reason to delete the topic
@type reason: unicode
@return: Metadata returned by the API
@rtype: dict
"""
return self.moderate_topic(page, 'delete', reason)
@must_be('user', 'flow-hide')
@need_extension('Flow')
def hide_topic(self, page, reason):
"""
Hide a Flow topic.
@param page: A Flow topic
@type page: Topic
@param reason: The reason to hide the topic
@type reason: unicode
@return: Metadata returned by the API
@rtype: dict
"""
return self.moderate_topic(page, 'hide', reason)
@must_be('user', 'flow-suppress')
@need_extension('Flow')
def suppress_topic(self, page, reason):
"""
Suppress a Flow topic.
@param page: A Flow topic
@type page: Topic
@param reason: The reason to suppress the topic
@type reason: unicode
@return: Metadata returned by the API
@rtype: dict
"""
return self.moderate_topic(page, 'suppress', reason)
@must_be('user')
@need_extension('Flow')
def restore_topic(self, page, reason):
"""
Restore a Flow topic.
@param page: A Flow topic
@type page: Topic
@param reason: The reason to restore the topic
@type reason: unicode
@return: Metadata returned by the API
@rtype: dict
"""
return self.moderate_topic(page, 'restore', reason)
@must_be('user')
@need_extension('Flow')
def moderate_post(self, post, state, reason):
"""
Moderate a Flow post.
@param post: A Flow post
@type post: Post
@param state: The new moderation state
@type state: str
@param reason: The reason to moderate the topic
@type reason: unicode
@return: Metadata returned by the API
@rtype: dict
"""
page = post.page
uuid = post.uuid
token = self.tokens['csrf']
params = {'action': 'flow', 'page': page, 'token': token,
'submodule': 'moderate-post', 'mpreason': reason,
'mpmoderationState': state, 'mppostId': uuid}
req = self._request(parameters=params, use_get=False)
data = req.submit()
return data['flow']['moderate-post']['committed']['topic']
@must_be('user', 'flow-delete')
@need_extension('Flow')
def delete_post(self, post, reason):
"""
Delete a Flow post.
@param post: A Flow post
@type post: Post
@param reason: The reason to delete the post
@type reason: unicode
@return: Metadata returned by the API
@rtype: dict
"""
return self.moderate_post(post, 'delete', reason)
@must_be('user', 'flow-hide')
@need_extension('Flow')
def hide_post(self, post, reason):
"""
Hide a Flow post.
@param post: A Flow post
@type post: Post
@param reason: The reason to hide the post
@type reason: unicode
@return: Metadata returned by the API
@rtype: dict
"""
return self.moderate_post(post, 'hide', reason)
@must_be('user', 'flow-suppress')
@need_extension('Flow')
def suppress_post(self, post, reason):
"""
Suppress a Flow post.
@param post: A Flow post
@type post: Post
@param reason: The reason to suppress the post
@type reason: unicode
@return: Metadata returned by the API
@rtype: dict
"""
return self.moderate_post(post, 'suppress', reason)
@must_be('user')
@need_extension('Flow')
def restore_post(self, post, reason):
"""
Restore a Flow post.
@param post: A Flow post
@type post: Post
@param reason: The reason to restore the post
@type reason: unicode
@return: Metadata returned by the API
@rtype: dict
"""
return self.moderate_post(post, 'restore', reason)
@deprecated_args(step=None)
def watched_pages(self, sysop=False, force=False, total=None):
"""
Return watchlist.
@param sysop: Returns watchlist of sysop user if true
@type sysop: bool
@param force_reload: Reload watchlist
@type force_reload: bool
@return: list of pages in watchlist
@rtype: list of pywikibot.Page objects
"""
self.login(sysop=sysop)
if not total:
total = pywikibot.config.special_page_limit
expiry = None if force else pywikibot.config.API_config_expiry
gen = api.PageGenerator(site=self, generator='watchlistraw',
expiry=expiry, gwrlimit=total)
return gen
# aliases for backwards compatibility
isBlocked = redirect_func(is_blocked, old_name='isBlocked',
class_name='APISite')
isAllowed = redirect_func(has_right, old_name='isAllowed',
class_name='APISite')
class DataSite(APISite):
"""Wikibase data capable site."""
def __init__(self, *args, **kwargs):
"""Constructor."""
super(DataSite, self).__init__(*args, **kwargs)
self._item_namespace = None
self._property_namespace = None
def _cache_entity_namespaces(self):
"""Find namespaces for each known wikibase entity type."""
self._item_namespace = False
self._property_namespace = False
for namespace in self.namespaces.values():
if not hasattr(namespace, 'defaultcontentmodel'):
continue
content_model = namespace.defaultcontentmodel
if content_model == 'wikibase-item':
self._item_namespace = namespace
elif content_model == 'wikibase-property':
self._property_namespace = namespace
@property
def item_namespace(self):
"""
Return namespace for items.
@return: item namespace
@rtype: Namespace
"""
if self._item_namespace is None:
self._cache_entity_namespaces()
if isinstance(self._item_namespace, Namespace):
return self._item_namespace
else:
raise EntityTypeUnknownException(
'%r does not support entity type "item"'
% self)
@property
def property_namespace(self):
"""
Return namespace for properties.
@return: property namespace
@rtype: Namespace
"""
if self._property_namespace is None:
self._cache_entity_namespaces()
if isinstance(self._property_namespace, Namespace):
return self._property_namespace
else:
raise EntityTypeUnknownException(
'%r does not support entity type "property"'
% self)
@property
@need_version("1.28-wmf.3")
def sparql_endpoint(self):
"""
Return the sparql endpoint url, if any has been set.
@return: sparql endpoint url
@rtype: str|None
"""
return self.siteinfo['general'].get('wikibase-sparql')
@property
@need_version("1.28-wmf.23")
def concept_base_uri(self):
"""
Return the base uri for concepts/entities.
@return: concept base uri
@rtype: str
"""
return self.siteinfo['general']['wikibase-conceptbaseuri']
def _get_baserevid(self, claim, baserevid):
"""Check that claim.on_item is set and matches baserevid if used."""
if not claim.on_item:
issue_deprecation_warning('claim without on_item set', None, 3)
if not baserevid:
warn('Neither claim.on_item nor baserevid provided',
UserWarning, 3)
return baserevid
if not baserevid:
return claim.on_item.latest_revision_id
issue_deprecation_warning(
'Site method with baserevid', 'claim with on_item set', 3)
if baserevid != claim.on_item.latest_revision_id:
warn('Using baserevid {0} instead of claim baserevid {1}'
''.format(baserevid, claim.on_item.latest_revision_id),
UserWarning, 3)
return baserevid
def __getattr__(self, attr):
"""
Provide data access methods.
Methods provided are get_info, get_sitelinks, get_aliases,
get_labels, get_descriptions, and get_urls.
"""
if hasattr(self.__class__, attr):
return getattr(self.__class__, attr)
if attr.startswith("get_"):
props = attr.replace("get_", "")
if props in ['info', 'sitelinks', 'aliases', 'labels',
'descriptions', 'urls']:
issue_deprecation_warning('DataSite.{0}()'.format(attr),
'WikibasePage', 2)
if props == 'urls':
props = 'sitelinks/urls'
method = self._get_propertyitem
f = functools.partial(method, props)
if hasattr(method, "__doc__"):
f.__doc__ = method.__doc__
return f
return super(APISite, self).__getattr__(attr)
def _get_propertyitem(self, props, source, **params):
"""Generic method to get the data for multiple Wikibase items."""
wbdata = self._get_item(source, props=props, **params)
if props == 'info':
return wbdata
if props == 'sitelinks/urls':
props = 'sitelinks'
assert props in wbdata, \
"API wbgetentities response lacks %s key" % props
return wbdata[props]
@deprecated("pywikibot.WikibasePage")
def get_item(self, source, **params):
"""Get the data for multiple Wikibase items."""
return self._get_item(source, **params)
# Only separated from get_item to avoid the deprecation message via
# _get_propertyitem
def _get_item(self, source, **params):
assert set(params) <= set(['props']), \
'Only "props" is a valid kwarg, not {0}'.format(set(params) -
set(['props']))
if isinstance(source, int) or \
isinstance(source, basestring) and source.isdigit():
ids = 'q' + str(source)
params = merge_unique_dicts(params, action='wbgetentities',
ids=ids)
wbrequest = self._simple_request(**params)
wbdata = wbrequest.submit()
assert 'success' in wbdata, \
"API wbgetentities response lacks 'success' key"
assert wbdata['success'] == 1, "API 'success' key is not 1"
assert 'entities' in wbdata, \
"API wbgetentities response lacks 'entities' key"
if ids.upper() in wbdata['entities']:
ids = ids.upper()
assert ids in wbdata['entities'], \
"API wbgetentities response lacks %s key" % ids
return wbdata['entities'][ids]
else:
# not implemented yet
raise NotImplementedError
def data_repository(self):
"""
Override parent method.
This avoids pointless API queries since the data repository
is this site by definition.
@return: this Site object
@rtype: DataSite
"""
return self
def geo_shape_repository(self):
"""Return Site object for the geo-shapes repository e.g. commons."""
# Do this via API instead when T162561 is implemented.
code, fam = self.shared_geo_shape_repository()
if bool(code or fam):
return pywikibot.Site(code, fam, self.username())
def tabular_data_repository(self):
"""Return Site object for the tabular-datas repository e.g. commons."""
# Do this via API instead when T164413 is implemented.
code, fam = self.shared_tabular_data_repository()
if bool(code or fam):
return pywikibot.Site(code, fam, self.username())
def loadcontent(self, identification, *props):
"""
Fetch the current content of a Wikibase item.
This is called loadcontent since
wbgetentities does not support fetching old
revisions. Eventually this will get replaced by
an actual loadrevisions.
@param identification: Parameters used to identify the page(s)
@type identification: dict
@param props: the optional properties to fetch.
"""
params = merge_unique_dicts(identification, action='wbgetentities',
# TODO: When props is empty it results in
# an empty string ('&props=') but it should
# result in a missing entry.
props=props if props else False)
req = self._simple_request(**params)
data = req.submit()
if 'success' not in data:
raise api.APIError(data['errors'])
return data['entities']
def preloaditempages(self, pagelist, groupsize=50):
"""
Yield ItemPages with content prefilled.
Note that pages will be iterated in a different order
than in the underlying pagelist.
@param pagelist: an iterable that yields either WikibasePage objects,
or Page objects linked to an ItemPage.
@param groupsize: how many pages to query at a time
@type groupsize: int
"""
for sublist in itergroup(pagelist, groupsize):
req = {'ids': [], 'titles': [], 'sites': []}
for p in sublist:
if isinstance(p, pywikibot.page.WikibasePage):
ident = p._defined_by()
for key in ident:
req[key].append(ident[key])
else:
assert p.site.has_data_repository, \
'Site must have a data repository'
if (p.site == p.site.data_repository() and
p.namespace() == p.data_repository.item_namespace):
req['ids'].append(p.title(withNamespace=False))
else:
req['sites'].append(p.site.dbName())
req['titles'].append(p._link._text)
req = self._simple_request(action='wbgetentities', **req)
data = req.submit()
for qid in data['entities']:
item = pywikibot.ItemPage(self, qid)
item._content = data['entities'][qid]
# No api call is made because item._content is given
item.get(get_redirect=True)
yield item
def getPropertyType(self, prop):
"""
Obtain the type of a property.
This is used specifically because we can cache
the value for a much longer time (near infinite).
"""
params = {'action': 'wbgetentities', 'ids': prop.getID(),
'props': 'datatype'}
expiry = datetime.timedelta(days=365 * 100)
# Store it for 100 years
req = self._request(expiry=expiry, parameters=params)
data = req.submit()
# the IDs returned from the API can be upper or lowercase, depending
# on the version. See bug T55894 for more information.
try:
dtype = data['entities'][prop.getID()]['datatype']
except KeyError:
dtype = data['entities'][prop.getID().lower()]['datatype']
return dtype
@must_be(group='user')
def editEntity(self, identification, data, bot=True, **kwargs):
"""
Edit entity.
@param identification: API parameters to use for entity identification
@type identification: dict
@param data: data updates
@type data: dict
@param bot: Whether to mark the edit as a bot edit
@type bot: bool
@return: New entity data
@rtype: dict
"""
if "id" in identification and identification["id"] == "-1":
del identification["id"]
params = dict(**identification)
if not params: # If no identification was provided
params['new'] = 'item' # TODO create properties+queries
params['action'] = 'wbeditentity'
if bot:
params['bot'] = 1
if 'baserevid' in kwargs and kwargs['baserevid']:
params['baserevid'] = kwargs['baserevid']
params['token'] = self.tokens['edit']
for arg in kwargs:
if arg in ['clear', 'data', 'summary']:
params[arg] = kwargs[arg]
elif arg != 'baserevid':
warn('Unknown wbeditentity parameter {0} ignored'.format(arg),
UserWarning, 2)
params['data'] = json.dumps(data)
req = self._simple_request(**params)
data = req.submit()
return data
@must_be(group='user')
def addClaim(self, item, claim, bot=True, summary=None):
"""
Add a claim.
@param item: Entity to modify
@type item: WikibasePage
@param claim: Claim to be added
@type claim: Claim
@param bot: Whether to mark the edit as a bot edit
@type bot: bool
@param summary: Edit summary
@type summary: str
"""
params = {'action': 'wbcreateclaim', 'entity': item.getID(),
'baserevid': item.latest_revision_id,
'snaktype': claim.getSnakType(), 'property': claim.getID(),
'summary': summary, 'bot': bot}
if claim.getSnakType() == 'value':
params['value'] = json.dumps(claim._formatValue())
params['token'] = self.tokens['edit']
req = self._simple_request(**params)
data = req.submit()
claim.snak = data['claim']['id']
# Update the item
if claim.getID() in item.claims:
item.claims[claim.getID()].append(claim)
else:
item.claims[claim.getID()] = [claim]
item.latest_revision_id = data['pageinfo']['lastrevid']
@must_be(group='user')
def changeClaimTarget(self, claim, snaktype='value',
bot=True, summary=None):
"""
Set the claim target to the value of the provided claim target.
@param claim: The source of the claim target value
@type claim: Claim
@param snaktype: An optional snaktype. Default: 'value'
@type snaktype: str ('value', 'novalue' or 'somevalue')
@param bot: Whether to mark the edit as a bot edit
@type bot: bool
@param summary: Edit summary
@type summary: str
"""
if claim.isReference or claim.isQualifier:
raise NotImplementedError
if not claim.snak:
# We need to already have the snak value
raise NoPage(claim)
params = {'action': 'wbsetclaimvalue', 'claim': claim.snak,
'snaktype': snaktype, 'summary': summary, 'bot': bot,
'token': self.tokens['edit']}
if snaktype == 'value':
params['value'] = json.dumps(claim._formatValue())
params['baserevid'] = claim.on_item.latest_revision_id
req = self._simple_request(**params)
data = req.submit()
return data
@must_be(group='user')
def save_claim(self, claim, summary=None, bot=True):
"""
Save the whole claim to the wikibase site.
@param claim: The claim to save
@type claim: Claim
@param bot: Whether to mark the edit as a bot edit
@type bot: bool
@param summary: Edit summary
@type summary: str
"""
if claim.isReference or claim.isQualifier:
raise NotImplementedError
if not claim.snak:
# We need to already have the snak value
raise NoPage(claim)
params = {'action': 'wbsetclaim',
'claim': json.dumps(claim.toJSON()),
'token': self.tokens['edit'],
'baserevid': claim.on_item.latest_revision_id,
'summary': summary,
'bot': bot,
}
req = self._simple_request(**params)
data = req.submit()
claim.on_item.latest_revision_id = data['pageinfo']['lastrevid']
return data
@must_be(group='user')
def editSource(self, claim, source, new=False,
bot=True, summary=None, baserevid=None):
"""
Create/Edit a source.
@param claim: A Claim object to add the source to
@type claim: Claim
@param source: A Claim object to be used as a source
@type source: Claim
@param new: Whether to create a new one if the "source" already exists
@type new: bool
@param bot: Whether to mark the edit as a bot edit
@type bot: bool
@param summary: Edit summary
@type summary: str
@param baserevid: Base revision id override, used to detect conflicts.
When omitted, revision of claim.on_item is used. DEPRECATED.
@type baserevid: long
"""
if claim.isReference or claim.isQualifier:
raise ValueError("The claim cannot have a source.")
params = {'action': 'wbsetreference', 'statement': claim.snak,
'baserevid': self._get_baserevid(claim, baserevid),
'summary': summary, 'bot': bot, 'token': self.tokens['edit']}
# build up the snak
if isinstance(source, list):
sources = source
else:
sources = [source]
snak = {}
for sourceclaim in sources:
datavalue = sourceclaim._formatDataValue()
valuesnaks = []
if sourceclaim.getID() in snak:
valuesnaks = snak[sourceclaim.getID()]
valuesnaks.append({'snaktype': 'value',
'property': sourceclaim.getID(),
'datavalue': datavalue,
},
)
snak[sourceclaim.getID()] = valuesnaks
# set the hash if the source should be changed.
# if present, all claims of one source have the same hash
if not new and hasattr(sourceclaim, 'hash'):
params['reference'] = sourceclaim.hash
params['snaks'] = json.dumps(snak)
req = self._simple_request(**params)
data = req.submit()
return data
@must_be(group='user')
def editQualifier(self, claim, qualifier, new=False, bot=True,
summary=None, baserevid=None):
"""
Create/Edit a qualifier.
@param claim: A Claim object to add the qualifier to
@type claim: Claim
@param qualifier: A Claim object to be used as a qualifier
@type qualifier: Claim
@param bot: Whether to mark the edit as a bot edit
@type bot: bool
@param summary: Edit summary
@type summary: str
@param baserevid: Base revision id override, used to detect conflicts.
When omitted, revision of claim.on_item is used. DEPRECATED.
@type baserevid: long
"""
if claim.isReference or claim.isQualifier:
raise ValueError("The claim cannot have a qualifier.")
params = {'action': 'wbsetqualifier', 'claim': claim.snak,
'baserevid': self._get_baserevid(claim, baserevid),
'summary': summary, 'bot': bot}
if (not new and
hasattr(qualifier, 'hash') and
qualifier.hash is not None):
params['snakhash'] = qualifier.hash
params['token'] = self.tokens['edit']
# build up the snak
if qualifier.getSnakType() == 'value':
params['value'] = json.dumps(qualifier._formatValue())
params['snaktype'] = qualifier.getSnakType()
params['property'] = qualifier.getID()
req = self._simple_request(**params)
data = req.submit()
return data
@must_be(group='user')
def removeClaims(self, claims, bot=True, summary=None, baserevid=None):
"""
Remove claims.
@param claims: Claims to be removed
@type claims: list of Claim
@param bot: Whether to mark the edit as a bot edit
@type bot: bool
@param summary: Edit summary
@type summary: str
@param baserevid: Base revision id override, used to detect conflicts.
When omitted, revision of claim.on_item is used. DEPRECATED.
@type baserevid: long
"""
# Check on_item vs baserevid for all additional claims
for claim in claims:
baserevid = self._get_baserevid(claim, baserevid)
items = set(claim.on_item for claim in claims if claim.on_item)
assert len(items) == 1
params = {
'action': 'wbremoveclaims', 'baserevid': baserevid,
'summary': summary,
'bot': bot,
'claim': '|'.join(claim.snak for claim in claims),
'token': self.tokens['edit'],
}
req = self._simple_request(**params)
data = req.submit()
return data
@must_be(group='user')
def removeSources(self, claim, sources,
bot=True, summary=None, baserevid=None):
"""
Remove sources.
@param claim: A Claim object to remove the sources from
@type claim: Claim
@param sources: A list of Claim objects that are sources
@type sources: Claim
@param bot: Whether to mark the edit as a bot edit
@type bot: bool
@param summary: Edit summary
@type summary: str
@param baserevid: Base revision id override, used to detect conflicts.
When omitted, revision of claim.on_item is used. DEPRECATED.
@type baserevid: long
"""
params = {
'action': 'wbremovereferences',
'baserevid': self._get_baserevid(claim, baserevid),
'summary': summary, 'bot': bot,
'statement': claim.snak,
'references': '|'.join(source.hash for source in sources),
'token': self.tokens['edit'],
}
req = self._simple_request(**params)
data = req.submit()
return data
@must_be(group='user')
def remove_qualifiers(self, claim, qualifiers,
bot=True, summary=None, baserevid=None):
"""
Remove qualifiers.
@param claim: A Claim object to remove the qualifier from
@type claim: Claim
@param qualifiers: Claim objects currently used as a qualifiers
@type qualifiers: list of Claim
@param bot: Whether to mark the edit as a bot edit
@type bot: bool
@param summary: Edit summary
@type summary: str
@param baserevid: Base revision id override, used to detect conflicts.
When omitted, revision of claim.on_item is used. DEPRECATED.
@type baserevid: long
"""
params = {
'action': 'wbremovequalifiers',
'claim': claim.snak,
'baserevid': self._get_baserevid(claim, baserevid),
'summary': summary,
'bot': bot,
'qualifiers': [qualifier.hash for qualifier in qualifiers],
'token': self.tokens['edit']
}
req = self._simple_request(**params)
data = req.submit()
return data
@must_be(group='user')
def linkTitles(self, page1, page2, bot=True):
"""
Link two pages together.
@param page1: First page to link
@type page1: pywikibot.Page
@param page2: Second page to link
@type page2: pywikibot.Page
@param bot: Whether to mark the edit as a bot edit
@type bot: bool
@return: dict API output
@rtype: dict
"""
params = {
'action': 'wblinktitles',
'tosite': page1.site.dbName(),
'totitle': page1.title(),
'fromsite': page2.site.dbName(),
'fromtitle': page2.title(),
'token': self.tokens['edit']
}
if bot:
params['bot'] = 1
req = self._simple_request(**params)
data = req.submit()
return data
@must_be(group='user')
@deprecated_args(ignoreconflicts='ignore_conflicts')
def mergeItems(self, fromItem, toItem, ignore_conflicts=None,
summary=None, bot=True):
"""
Merge two items together.
@param fromItem: Item to merge from
@type fromItem: pywikibot.ItemPage
@param toItem: Item to merge into
@type toItem: pywikibot.ItemPage
@param ignore_conflicts: Which type of conflicts
('description', 'sitelink', and 'statement')
should be ignored
@type ignore_conflicts: list of str
@param summary: Edit summary
@type summary: str
@param bot: Whether to mark the edit as a bot edit
@type bot: bool
@return: dict API output
@rtype: dict
"""
params = {
'action': 'wbmergeitems',
'fromid': fromItem.getID(),
'toid': toItem.getID(),
'ignoreconflicts': ignore_conflicts,
'token': self.tokens['edit'],
'summary': summary,
}
if bot:
params['bot'] = 1
req = self._simple_request(**params)
data = req.submit()
return data
@must_be(group='user')
def set_redirect_target(self, from_item, to_item):
"""
Make a redirect to another item.
@param to_item: title of target item.
@type to_item: pywikibot.ItemPage
@param from_item: Title of the item to be redirected.
@type from_item: pywikibot.ItemPage
"""
params = {
'action': 'wbcreateredirect',
'from': from_item.getID(),
'to': to_item.getID(),
'token': self.tokens['edit']
}
req = self._simple_request(**params)
data = req.submit()
return data
@must_be(group='user')
def createNewItemFromPage(self, page, bot=True, **kwargs):
"""
Create a new Wikibase item for a provided page.
@param page: page to fetch links from
@type page: pywikibot.Page
@param bot: Whether to mark the edit as a bot edit
@type bot: bool
@return: pywikibot.ItemPage of newly created item
@rtype: pywikibot.ItemPage
"""
sitelinks = {
page.site.dbName(): {
'site': page.site.dbName(),
'title': page.title(),
}
}
labels = {
page.site.lang: {
'language': page.site.lang,
'value': page.title(),
}
}
for link in page.iterlanglinks():
sitelinks[link.site.dbName()] = {
'site': link.site.dbName(),
'title': link.title,
}
labels[link.site.lang] = {
'language': link.site.lang,
'value': link.title,
}
data = {
'sitelinks': sitelinks,
'labels': labels,
}
result = self.editEntity({}, data, bot=bot, **kwargs)
return pywikibot.ItemPage(self, result['entity']['id'])
def search_entities(self, search, language, limit=None, **kwargs):
"""
Search for pages or properties that contain the given text.
@param search: Text to find.
@type search: str
@param language: Language to search in.
@type language: str
@param limit: Maximum number of pages to retrieve in total, or None in
case of no limit.
@type limit: int or None
@return: 'search' list from API output.
@rtype: api.APIGenerator
"""
lang_codes = [lang['code'] for lang in self._siteinfo.get('languages')]
if language not in lang_codes:
raise ValueError(u'Data site used does not support provided '
u'language.')
if 'site' in kwargs:
if kwargs['site'].sitename != self.sitename:
raise ValueError('The site given in the kwargs is different.')
else:
warn('search_entities should not get a site via kwargs.',
UserWarning, 2)
del kwargs['site']
parameters = dict(search=search, language=language, **kwargs)
gen = api.APIGenerator('wbsearchentities', data_name='search',
site=self, parameters=parameters)
if limit is not None:
gen.set_maximum_items(limit)
return gen
| {
"content_hash": "8f8ffbd46b2ec261c78a302d8e0e31e6",
"timestamp": "",
"source": "github",
"line_count": 7946,
"max_line_length": 92,
"avg_line_length": 40.29272589982381,
"alnum_prop": 0.5546872559859573,
"repo_name": "npdoty/pywikibot",
"id": "2e282d1fd0de8e5f24b57196cb72bd1b0aa3db05",
"size": "320190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywikibot/site.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "HTML",
"bytes": "1365"
},
{
"name": "Python",
"bytes": "4485564"
}
],
"symlink_target": ""
} |
from ..broker import Broker
class AuthServerBroker(Broker):
controller = "auth_servers"
def show(self, **kwargs):
"""Shows the details for the specified auth server.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The authentication server identifier.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_server: The auth server identified by the specified id.
:rtype auth_server: AuthServer
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available auth servers. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_service_id: The id of the authentication service, this server is member of.
:type auth_service_id: Array of Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The authentication server identifier.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, priority, enabled_ind, auth_server, auth_port, auth_shared_secret, auth_encryption, auth_cert, created_at, updated_at, secure_version, auth_service_id, auth_protocol, source_interface_id, auth_version.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AuthServer. Valid values are id, priority, enabled_ind, auth_server, auth_port, auth_shared_secret, auth_encryption, auth_cert, created_at, updated_at, secure_version, auth_service_id, auth_protocol, source_interface_id, auth_version. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_servers: An array of the AuthServer objects that match the specified input criteria.
:rtype auth_servers: Array of AuthServer
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available auth servers matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_cert: The SSL certificate of an Authentication Server. (Required for Active Directory method).
:type auth_cert: Array of String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_encryption: The Encryption method (none or SSL) (required for Active Directory method).
:type auth_encryption: Array of String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_port: Authentication Port (required for Active Directory method).
:type auth_port: Array of Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_protocol: The password exchange protocol to use for authentication. One of (PAP, CHAP)
:type auth_protocol: Array of String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_server: Authentication Server Name (required for Radius, Tacacs, LDAP and Active Directory methods).
:type auth_server: Array of String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_service_id: The id of the authentication service, this server is member of.
:type auth_service_id: Array of Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_shared_secret: The shared secret of an authentication server (required for Radius and Tacacs methods).
:type auth_shared_secret: Array of String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_version: The version used for the authentication (LDAP).
:type auth_version: Array of Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the record was initially created in NetMRI.
:type created_at: Array of DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param enabled_ind: A flag indicating whether the authentication server settings is enabled or disabled.
:type enabled_ind: Array of Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The authentication server identifier.
:type id: Array of Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param priority: Priority assigned to an authentication server.
:type priority: Array of Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param secure_version: Internal encrypt version used for any auth_shared_secret
:type secure_version: Array of Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param source_interface_id: The NetMRI interface to use as source of the packets sent to the authentication server.
:type source_interface_id: Array of Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, priority, enabled_ind, auth_server, auth_port, auth_shared_secret, auth_encryption, auth_cert, created_at, updated_at, secure_version, auth_service_id, auth_protocol, source_interface_id, auth_version.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AuthServer. Valid values are id, priority, enabled_ind, auth_server, auth_port, auth_shared_secret, auth_encryption, auth_cert, created_at, updated_at, secure_version, auth_service_id, auth_protocol, source_interface_id, auth_version. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against auth servers, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: auth_cert, auth_encryption, auth_port, auth_protocol, auth_server, auth_service_id, auth_shared_secret, auth_version, created_at, enabled_ind, id, priority, secure_version, source_interface_id, updated_at.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_servers: An array of the AuthServer objects that match the specified input criteria.
:rtype auth_servers: Array of AuthServer
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available auth servers matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: auth_cert, auth_encryption, auth_port, auth_protocol, auth_server, auth_service_id, auth_shared_secret, auth_version, created_at, enabled_ind, id, priority, secure_version, source_interface_id, updated_at.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_auth_cert: The operator to apply to the field auth_cert. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. auth_cert: The SSL certificate of an Authentication Server. (Required for Active Directory method). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_auth_cert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_auth_cert: If op_auth_cert is specified, the field named in this input will be compared to the value in auth_cert using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_auth_cert must be specified if op_auth_cert is specified.
:type val_f_auth_cert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_auth_cert: If op_auth_cert is specified, this value will be compared to the value in auth_cert using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_auth_cert must be specified if op_auth_cert is specified.
:type val_c_auth_cert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_auth_encryption: The operator to apply to the field auth_encryption. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. auth_encryption: The Encryption method (none or SSL) (required for Active Directory method). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_auth_encryption: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_auth_encryption: If op_auth_encryption is specified, the field named in this input will be compared to the value in auth_encryption using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_auth_encryption must be specified if op_auth_encryption is specified.
:type val_f_auth_encryption: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_auth_encryption: If op_auth_encryption is specified, this value will be compared to the value in auth_encryption using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_auth_encryption must be specified if op_auth_encryption is specified.
:type val_c_auth_encryption: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_auth_port: The operator to apply to the field auth_port. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. auth_port: Authentication Port (required for Active Directory method). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_auth_port: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_auth_port: If op_auth_port is specified, the field named in this input will be compared to the value in auth_port using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_auth_port must be specified if op_auth_port is specified.
:type val_f_auth_port: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_auth_port: If op_auth_port is specified, this value will be compared to the value in auth_port using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_auth_port must be specified if op_auth_port is specified.
:type val_c_auth_port: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_auth_protocol: The operator to apply to the field auth_protocol. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. auth_protocol: The password exchange protocol to use for authentication. One of (PAP, CHAP) For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_auth_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_auth_protocol: If op_auth_protocol is specified, the field named in this input will be compared to the value in auth_protocol using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_auth_protocol must be specified if op_auth_protocol is specified.
:type val_f_auth_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_auth_protocol: If op_auth_protocol is specified, this value will be compared to the value in auth_protocol using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_auth_protocol must be specified if op_auth_protocol is specified.
:type val_c_auth_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_auth_server: The operator to apply to the field auth_server. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. auth_server: Authentication Server Name (required for Radius, Tacacs, LDAP and Active Directory methods). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_auth_server: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_auth_server: If op_auth_server is specified, the field named in this input will be compared to the value in auth_server using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_auth_server must be specified if op_auth_server is specified.
:type val_f_auth_server: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_auth_server: If op_auth_server is specified, this value will be compared to the value in auth_server using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_auth_server must be specified if op_auth_server is specified.
:type val_c_auth_server: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_auth_service_id: The operator to apply to the field auth_service_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. auth_service_id: The id of the authentication service, this server is member of. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_auth_service_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_auth_service_id: If op_auth_service_id is specified, the field named in this input will be compared to the value in auth_service_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_auth_service_id must be specified if op_auth_service_id is specified.
:type val_f_auth_service_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_auth_service_id: If op_auth_service_id is specified, this value will be compared to the value in auth_service_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_auth_service_id must be specified if op_auth_service_id is specified.
:type val_c_auth_service_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_auth_shared_secret: The operator to apply to the field auth_shared_secret. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. auth_shared_secret: The shared secret of an authentication server (required for Radius and Tacacs methods). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_auth_shared_secret: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_auth_shared_secret: If op_auth_shared_secret is specified, the field named in this input will be compared to the value in auth_shared_secret using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_auth_shared_secret must be specified if op_auth_shared_secret is specified.
:type val_f_auth_shared_secret: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_auth_shared_secret: If op_auth_shared_secret is specified, this value will be compared to the value in auth_shared_secret using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_auth_shared_secret must be specified if op_auth_shared_secret is specified.
:type val_c_auth_shared_secret: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_auth_version: The operator to apply to the field auth_version. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. auth_version: The version used for the authentication (LDAP). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_auth_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_auth_version: If op_auth_version is specified, the field named in this input will be compared to the value in auth_version using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_auth_version must be specified if op_auth_version is specified.
:type val_f_auth_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_auth_version: If op_auth_version is specified, this value will be compared to the value in auth_version using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_auth_version must be specified if op_auth_version is specified.
:type val_c_auth_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the record was initially created in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_enabled_ind: The operator to apply to the field enabled_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. enabled_ind: A flag indicating whether the authentication server settings is enabled or disabled. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_enabled_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_enabled_ind: If op_enabled_ind is specified, the field named in this input will be compared to the value in enabled_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_enabled_ind must be specified if op_enabled_ind is specified.
:type val_f_enabled_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_enabled_ind: If op_enabled_ind is specified, this value will be compared to the value in enabled_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_enabled_ind must be specified if op_enabled_ind is specified.
:type val_c_enabled_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The authentication server identifier. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_priority: The operator to apply to the field priority. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. priority: Priority assigned to an authentication server. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_priority: If op_priority is specified, the field named in this input will be compared to the value in priority using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_priority must be specified if op_priority is specified.
:type val_f_priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_priority: If op_priority is specified, this value will be compared to the value in priority using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_priority must be specified if op_priority is specified.
:type val_c_priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_secure_version: The operator to apply to the field secure_version. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. secure_version: Internal encrypt version used for any auth_shared_secret For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_secure_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_secure_version: If op_secure_version is specified, the field named in this input will be compared to the value in secure_version using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_secure_version must be specified if op_secure_version is specified.
:type val_f_secure_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_secure_version: If op_secure_version is specified, this value will be compared to the value in secure_version using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_secure_version must be specified if op_secure_version is specified.
:type val_c_secure_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_source_interface_id: The operator to apply to the field source_interface_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. source_interface_id: The NetMRI interface to use as source of the packets sent to the authentication server. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_source_interface_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_source_interface_id: If op_source_interface_id is specified, the field named in this input will be compared to the value in source_interface_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_source_interface_id must be specified if op_source_interface_id is specified.
:type val_f_source_interface_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_source_interface_id: If op_source_interface_id is specified, this value will be compared to the value in source_interface_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_source_interface_id must be specified if op_source_interface_id is specified.
:type val_c_source_interface_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, priority, enabled_ind, auth_server, auth_port, auth_shared_secret, auth_encryption, auth_cert, created_at, updated_at, secure_version, auth_service_id, auth_protocol, source_interface_id, auth_version.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AuthServer. Valid values are id, priority, enabled_ind, auth_server, auth_port, auth_shared_secret, auth_encryption, auth_cert, created_at, updated_at, secure_version, auth_service_id, auth_protocol, source_interface_id, auth_version. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_servers: An array of the AuthServer objects that match the specified input criteria.
:rtype auth_servers: Array of AuthServer
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def update(self, **kwargs):
"""Updates an existing auth server.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The authentication server identifier.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_cert: The SSL certificate of an Authentication Server. (Required for Active Directory method). If omitted, this field will not be updated.
:type auth_cert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_encryption: The Encryption method (none or SSL) (required for Active Directory method). If omitted, this field will not be updated.
:type auth_encryption: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_port: Authentication Port (required for Active Directory method). If omitted, this field will not be updated.
:type auth_port: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_protocol: The password exchange protocol to use for authentication. One of (PAP, CHAP) If omitted, this field will not be updated.
:type auth_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_server: Authentication Server Name (required for Radius, Tacacs, LDAP and Active Directory methods). If omitted, this field will not be updated.
:type auth_server: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_service_id: The id of the authentication service, this server is member of. If omitted, this field will not be updated.
:type auth_service_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_shared_secret: The shared secret of an authentication server (required for Radius and Tacacs methods). If omitted, this field will not be updated.
:type auth_shared_secret: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_version: The version used for the authentication (LDAP). If omitted, this field will not be updated.
:type auth_version: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` True
:param enabled_ind: A flag indicating whether the authentication server settings is enabled or disabled. If omitted, this field will be updated to the default value.
:type enabled_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param priority: Priority assigned to an authentication server. If omitted, this field will not be updated.
:type priority: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param source_interface_id: The NetMRI interface to use as source of the packets sent to the authentication server. If omitted, this field will not be updated.
:type source_interface_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the updated auth server.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the updated auth server.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the updated auth server.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_server: The updated auth server.
:rtype auth_server: AuthServer
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified auth server from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The authentication server identifier.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
| {
"content_hash": "0b2dbb63c2680827f25c21aa46ac3f74",
"timestamp": "",
"source": "github",
"line_count": 935,
"max_line_length": 615,
"avg_line_length": 53.971122994652404,
"alnum_prop": 0.5979826803796842,
"repo_name": "infobloxopen/infoblox-netmri",
"id": "585b6c4687fdd6c28794b61ec9d4f25d3b939bab",
"size": "50463",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "infoblox_netmri/api/broker/v2_8_0/auth_server_broker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2110"
},
{
"name": "Python",
"bytes": "20560"
}
],
"symlink_target": ""
} |
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
class TestConvert:
def test_convert_objects(self, float_string_frame):
oops = float_string_frame.T.T
converted = oops._convert(datetime=True)
tm.assert_frame_equal(converted, float_string_frame)
assert converted["A"].dtype == np.float64
# force numeric conversion
float_string_frame["H"] = "1."
float_string_frame["I"] = "1"
# add in some items that will be nan
length = len(float_string_frame)
float_string_frame["J"] = "1."
float_string_frame["K"] = "1"
float_string_frame.loc[float_string_frame.index[0:5], ["J", "K"]] = "garbled"
converted = float_string_frame._convert(datetime=True, numeric=True)
assert converted["H"].dtype == "float64"
assert converted["I"].dtype == "int64"
assert converted["J"].dtype == "float64"
assert converted["K"].dtype == "float64"
assert len(converted["J"].dropna()) == length - 5
assert len(converted["K"].dropna()) == length - 5
# via astype
converted = float_string_frame.copy()
converted["H"] = converted["H"].astype("float64")
converted["I"] = converted["I"].astype("int64")
assert converted["H"].dtype == "float64"
assert converted["I"].dtype == "int64"
# via astype, but errors
converted = float_string_frame.copy()
with pytest.raises(ValueError, match="invalid literal"):
converted["H"].astype("int32")
def test_convert_mixed_single_column(self):
# GH#4119, not converting a mixed type (e.g.floats and object)
# mixed in a single column
df = DataFrame({"s": Series([1, "na", 3, 4])})
result = df._convert(datetime=True, numeric=True)
expected = DataFrame({"s": Series([1, np.nan, 3, 4])})
tm.assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
mixed1 = DataFrame({"a": [1, 2, 3], "b": [4.0, 5, 6], "c": ["x", "y", "z"]})
mixed2 = mixed1._convert(datetime=True)
tm.assert_frame_equal(mixed1, mixed2)
| {
"content_hash": "9a2ea25de8edfe7f63a11ae5d2c5c913",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 85,
"avg_line_length": 37.52542372881356,
"alnum_prop": 0.5925925925925926,
"repo_name": "pandas-dev/pandas",
"id": "118af9f532abeb96e67e761cd2c1a2e5b2c494e4",
"size": "2214",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pandas/tests/frame/methods/test_convert.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "512"
},
{
"name": "C",
"bytes": "366145"
},
{
"name": "CSS",
"bytes": "1800"
},
{
"name": "Cython",
"bytes": "1186787"
},
{
"name": "Dockerfile",
"bytes": "1411"
},
{
"name": "HTML",
"bytes": "456531"
},
{
"name": "Python",
"bytes": "18778786"
},
{
"name": "Shell",
"bytes": "10369"
},
{
"name": "Smarty",
"bytes": "8486"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
import Tensile.Tensile as Tensile
def test_2sum_gsu_src(tmpdir):
Tensile.Tensile([Tensile.TensileTestPath("disabled/multi_sum/2sum_gsu_src.yaml"), tmpdir.strpath])
def test_2sum(tmpdir):
Tensile.Tensile([Tensile.TensileTestPath("disabled/multi_sum/2sum.yaml"), tmpdir.strpath])
def test_2sum_gsu(tmpdir):
Tensile.Tensile([Tensile.TensileTestPath("disabled/multi_sum/2sum_gsu.yaml"), tmpdir.strpath])
def test_3sum_gsu(tmpdir):
Tensile.Tensile([Tensile.TensileTestPath("disabled/multi_sum/3sum_gsu.yaml"), tmpdir.strpath])
def test_2sum_gsu_simple(tmpdir):
Tensile.Tensile([Tensile.TensileTestPath("disabled/multi_sum/2sum_gsu_simple.yaml"), tmpdir.strpath])
def test_2sum_src(tmpdir):
Tensile.Tensile([Tensile.TensileTestPath("disabled/multi_sum/2sum_src.yaml"), tmpdir.strpath])
def test_simple_sum2_scrambled(tmpdir):
Tensile.Tensile([Tensile.TensileTestPath("disabled/multi_sum/simple_sum2_scrambled.yaml"), tmpdir.strpath])
| {
"content_hash": "77ad56f4e43e57ac5f58effaa39ccc39",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 108,
"avg_line_length": 41,
"alnum_prop": 0.7815482502651113,
"repo_name": "ROCmSoftwarePlatform/Tensile",
"id": "2f08def0d6083ab441d004d5f0ed664f9a0c528e",
"size": "2243",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Tensile/Tests/disabled/multi_sum/test_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1179916"
},
{
"name": "Awk",
"bytes": "1667"
},
{
"name": "C++",
"bytes": "1570879"
},
{
"name": "CMake",
"bytes": "70754"
},
{
"name": "Dockerfile",
"bytes": "1413"
},
{
"name": "Groovy",
"bytes": "23999"
},
{
"name": "Makefile",
"bytes": "5336"
},
{
"name": "Python",
"bytes": "2699223"
},
{
"name": "Shell",
"bytes": "64197"
},
{
"name": "TeX",
"bytes": "83918"
}
],
"symlink_target": ""
} |
from typing import Any
from argparse import ArgumentParser
from zerver.models import all_realm_filters
from zerver.lib.actions import do_add_realm_filter, do_remove_realm_filter
from zerver.lib.management import ZulipBaseCommand
import sys
class Command(ZulipBaseCommand):
help = """Create a link filter rule for the specified realm.
NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
RegExp syntax. In addition to JS-compatible syntax, the following features are available:
* Named groups will be converted to numbered groups automatically
* Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
Example: ./manage.py realm_filters --realm=zulip --op=add '#(?P<id>[0-9]{2,8})' \
'https://support.example.com/ticket/%(id)s'
Example: ./manage.py realm_filters --realm=zulip --op=remove '#(?P<id>[0-9]{2,8})'
Example: ./manage.py realm_filters --realm=zulip --op=show
"""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('--op',
dest='op',
type=str,
default="show",
help='What operation to do (add, show, remove).')
parser.add_argument('pattern', metavar='<pattern>', type=str, nargs='?', default=None,
help="regular expression to match")
parser.add_argument('url_format_string', metavar='<url pattern>', type=str, nargs='?',
help="format string to substitute")
self.add_realm_args(parser, True)
def handle(self, *args: Any, **options: str) -> None:
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
if options["op"] == "show":
print("%s: %s" % (realm.string_id, all_realm_filters().get(realm.id, [])))
sys.exit(0)
pattern = options['pattern']
if not pattern:
self.print_help("./manage.py", "realm_filters")
sys.exit(1)
if options["op"] == "add":
url_format_string = options['url_format_string']
if not url_format_string:
self.print_help("./manage.py", "realm_filters")
sys.exit(1)
do_add_realm_filter(realm, pattern, url_format_string)
sys.exit(0)
elif options["op"] == "remove":
do_remove_realm_filter(realm, pattern=pattern)
sys.exit(0)
else:
self.print_help("./manage.py", "realm_filters")
sys.exit(1)
| {
"content_hash": "f0ac35d85c7f4bfba040e0f945cafa4e",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 95,
"avg_line_length": 42.967213114754095,
"alnum_prop": 0.5963372758489126,
"repo_name": "brockwhittaker/zulip",
"id": "a814e1c78c9dd7e2c07fc294634790abe6c779ea",
"size": "2622",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/management/commands/realm_filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "442662"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "515931"
},
{
"name": "JavaScript",
"bytes": "2195008"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "393671"
},
{
"name": "Puppet",
"bytes": "87413"
},
{
"name": "Python",
"bytes": "3948219"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "65702"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
import json
from distutils.version import LooseVersion
def create_pages():
from cms.models import Placeholder
from cms.api import create_page, add_plugin, publish_page
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
placeholder = {}
with open('starting_page.json') as data_file:
content = json.load(data_file)
try:
# try to get a feature template with fallback
template = settings.CMS_TEMPLATES[1][0]
if template != 'feature.html':
template = settings.CMS_TEMPLATES[0][0]
except IndexError:
template = settings.CMS_TEMPLATES[0][0]
lang = settings.LANGUAGES[0][0]
page = create_page(_('Home'), template, lang)
placeholder['main'] = page.placeholders.get(slot='content')
try:
# try to get a feature placeholder
placeholder_feature = page.placeholders.get(slot='feature')
add_plugin(placeholder_feature, 'TextPlugin', lang,
body=content['feature'])
except Placeholder.DoesNotExist:
# fallback, add it to the
add_plugin(placeholder['main'], 'TextPlugin', lang, body=content['feature'])
pass
# Add main content to a MultiColumnPlugin
multi_columns_plugin = add_plugin(placeholder['main'], 'MultiColumnPlugin', lang)
for column_content in content['main']:
col = add_plugin(placeholder['main'], 'ColumnPlugin', lang,
target=multi_columns_plugin, **{'width': '33%'})
add_plugin(placeholder['main'], 'TextPlugin', lang, body=column_content,
target=col)
# In order to publish the page there needs to be at least one user
if User.objects.count() > 0:
try:
publish_page(page, User.objects.all()[0], lang)
except TypeError:
# supporting old cms versions
publish_page(page, User.objects.all()[0])
if __name__ == '__main__':
import django
if LooseVersion(django.get_version()) >= LooseVersion('1.7'):
django.setup()
create_pages()
| {
"content_hash": "bebc93413b7d6d708903701771b882af",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 85,
"avg_line_length": 35.693548387096776,
"alnum_prop": 0.6375960234975147,
"repo_name": "Glasgow2015/team-10",
"id": "1ec6075aa0ac1a72c7ce246e2ebbaddaac3b385f",
"size": "2237",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/djangocms_installer/share/starting_page.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "562501"
},
{
"name": "HTML",
"bytes": "458748"
},
{
"name": "JavaScript",
"bytes": "786940"
},
{
"name": "PHP",
"bytes": "5453"
},
{
"name": "Python",
"bytes": "12350526"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "4232"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
} |
from chaco.shell.scaly_plot import ScalyPlot
from enable.component_editor import ComponentEditor
from pyface.workbench.api import TraitsUIEditor
from traits.api import Any, Enum, HasTraits, Property, Str
from traitsui import api as tui
class PlotUI(HasTraits):
""" Simple Traits UI proxy for a Chaco plot.
"""
# The plot.
component = Any()
traits_view = tui.View(
tui.Item('component', editor=ComponentEditor(), show_label=False),
resizable=True,
)
class PlotEditor(TraitsUIEditor):
""" A Workbench Editor showing a Chaco plot for the shell interface.
"""
bgcolor = Str('white')
image_default_origin = Enum("bottom left", "top left",
"bottom right", "top right")
# The plot.
component = Property(Any)
container = Property(Any)
# The PlotData.
data = Any()
# The PlotSession of which we are a part. We need to know this in order
# to notify it of our being closed, etc.
session = Any()
def __init__(self, is_image=False, bgcolor="white",
image_default_origin="top left", *args, **kw):
super(TraitsUIEditor, self).__init__(**kw)
# Some defaults which should be overridden by preferences.
self.bgcolor = bgcolor
self.image_default_origin = image_default_origin
# Create an empty top-level container
if is_image:
top_container = self._create_top_img_container()
else:
top_container = self._create_top_container()
self.obj = PlotUI(component=top_container)
#### PlotWindow interface ##################################################
def get_container(self):
return self.obj.component
def set_container(self, container):
self.obj.component = container
def iconize(self, iconize):
"""Iconizes the window if *iconize* is True.
Do nothing in this implementation.
"""
def maximize(self, maximize):
""" If *maximize* is True, maximizes the window size; restores if False.
Do nothing in this implementation.
"""
def set_size(self, width, height):
pass
def set_title(self, title):
self.name = title
def raise_window(self):
self.window.activate_editor(self)
#### Editor interface ######################################################
def destroy_control(self):
""" Destroy the toolkit-specific control that represents the part.
"""
self._on_window_close()
super(TraitsUIEditor, self).destroy_control()
#### Private interface #####################################################
def _get_container(self):
return self.obj.component
def _set_container(self, value):
self.obj.component = value
def _get_component(self):
return self.obj.component
def _set_component(self, value):
self.obj.component = value
def _create_top_container(self):
plot = ScalyPlot(
padding=50,
fill_padding=True,
bgcolor=self.bgcolor,
use_backbuffer=True,
)
return plot
def _create_top_img_container(self):
plot = ScalyPlot(
padding=50,
fill_padding=True,
bgcolor=self.bgcolor,
use_backbuffer=True,
default_origin=self.image_default_origin,
)
return plot
def _on_window_close(self):
if self.session:
try:
ndx = self.session.windows.index(self)
self.session.del_window(ndx)
except ValueError:
pass
| {
"content_hash": "eff8b0c16aedac94419418a262fab3fe",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 80,
"avg_line_length": 26.717391304347824,
"alnum_prop": 0.5728234336859235,
"repo_name": "burnpanck/chaco",
"id": "be78bd5c35665805bffe9f3371bd87a39b27e7aa",
"size": "3687",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chaco/plugin/plot_editor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "57089"
},
{
"name": "C++",
"bytes": "9881"
},
{
"name": "Gnuplot",
"bytes": "611"
},
{
"name": "Python",
"bytes": "1761203"
}
],
"symlink_target": ""
} |
"""A utility script to automate the process of symbolizing a SyzyASan
minidump.
"""
from collections import namedtuple
import optparse
import os
import re
import subprocess
import sys
# The sentinel value that we use at the end of the command executed in the
# debugger.
_SENTINEL = 'ENDENDEND'
# The default values for the path to cdb.exe.
_DEFAULT_CDB_PATHS = [
r'c:\Program Files (x86)\Debugging Tools for Windows (x86)\cdb.exe',
r'c:\Program Files (x86)\Windows Kits\8.0\Debuggers\x86\cdb.exe',
]
# The frame containing the error info structure.
_BAD_ACCESS_INFO_FRAMES = [
'asan_rtl!agent::asan::AsanRuntime::OnError',
'syzyasan_rtl!agent::asan::AsanRuntime::ExceptionFilterImpl',
]
# The helper string that will be included at the beginning of the printed crash
# reports.
_ERROR_HELP_URL = 'You can go to \
https://code.google.com/p/syzygy/wiki/SyzyASanBug to get more information \
about how to treat this bug.'
# Command to print the error info structure.
_GET_BAD_ACCESS_INFO_COMMAND = 'dt -o error_info'
# Command to print the block info structure nested into the error info one.
_GET_BLOCK_INFO_COMMAND = 'dt agent::asan::AsanBlockInfo poi(error_info) -o'
# Template command to print a stack trace from an error info structure.
#
# Here's the description of the keyword to use in this template:
# - operand: The operator to use to access the structure ('.' or '->').
# - type: The stack trace type ('alloc' or 'free')
_GET_STACK_COMMAND_TEMPLATE = (
'dps @@(&error_info{operand}block_info.{type}_stack) '
'l@@(error_info{operand}block_info.{type}_stack_size);'
)
# Template command to print the stack trace of a corrupt block from an error
# info structure.
#
# Here's the description of the keyword to use in this template:
# - operand: The operator to use to access the structure ('.' or '->').
# - range_idx: The corrupt range index.
# - block_idx: The block index in its range.
# - type: The stack trace type ('alloc' or 'free')
_GET_CORRUPT_BLOCK_STACK_TRACE_TEMPLATE = (
'dps @@(((syzyasan_rtl!agent::asan::AsanCorruptBlockRange*)'
'(error_info{operand}corrupt_ranges))[{range_idx}].block_info[{block_idx}].'
'{type}_stack) '
'L@@(((syzyasan_rtl!agent::asan::AsanCorruptBlockRange*)'
'(error_info{operand}corrupt_ranges))[{range_idx}].block_info[{block_idx}].'
'{type}_stack_size)'
)
# A named tuple that will contain an ASan crash report.
ASanReport = namedtuple('ASanReport',
'bad_access_info '
'crash_stack '
'crash_stack_hash '
'alloc_stack '
'alloc_stack_hash '
'free_stack '
'free_stack_hash '
'corrupt_heap_info '
'from_uef')
# Match a stack frame as printed by cdb.exe (or windbg.exe).
#
# Here's some examples of stack frames that this regex will match:
# - 003cd6b8 0ff3a36b 007bff00 00004e84 003cd760 foo!bar+0x18
# - 003cd6b8 0ff3a36b 007bff00 00004e84 003cd760 0xcafebabe
# - (Inline) -------- -------- -------- -------- foo!bar+0x42
#
# Here's a description of the different groups in this regex:
# - args: The arguments in front of the module name.
# - module: The module's name.
# - location: The location in the module.
# - address: If the module name is not available then we'll get its address.
_STACK_FRAME_RE = re.compile("""
^
(\(Inline\)\s)?
(?P<args>([0-9A-F\-]+\ +)+)
(?:
(?P<module>[^ ]+)(!(?P<location>.*))? |
(?P<address>0x[0-9a-f]+)
)
$
""", re.VERBOSE | re.IGNORECASE)
# Match a list of modules as printed by cdb.exe when running the 'lm n' command.
#
# Here's a description of the different groups in this regex:
# - start: Module's start address.
# - end: Module's end address.
# - module_name: Module's name.
# - image_name: Image's name.
_MODULE_MATCH_RE = re.compile("""
(?P<start>\w+)\s+
(?P<end>\w+)\s+
(?P<module_name>\w+)\s+
(?P<image_name>.*)
""", re.VERBOSE | re.IGNORECASE)
# Match a Chrome frame in a stack trace.
_CHROME_RE = re.compile('^(chrome[_0-9A-F]+)$', re.VERBOSE | re.IGNORECASE)
# Match a frame pointer in a stack frame as it is printed by a debugger.
_FRAME_POINTER_RE = re.compile(
'\s*[a-z0-9]+\s+(?P<address>[a-z0-9]+)\s+.*', re.VERBOSE | re.IGNORECASE)
# Match an enum value as it is printed by a debugger. They're usually
# represented as 'NUMERIC_VALUE ( LITERAL_VALUE )'.
_ENUM_VAL_RE = re.compile(
'\s*(?P<num_value>\d+)\s*\(\s*(?P<literal_value>[a-zA-Z0-9_]+)\s*\)',
re.VERBOSE | re.IGNORECASE)
def NormalizeChromeSymbol(symbol):
"""Normalize a Chrome symbol."""
return _CHROME_RE.sub('chrome_dll', symbol)
def NormalizeStackTrace(stack_trace):
"""Normalize a given stack trace.
Args:
stack_trace: The stack trace to normalize.
Returns:
The normalized stack trace and its hash.
"""
trace_hash = 0
output_trace = []
for line in stack_trace:
m = _STACK_FRAME_RE.match(line)
if not m:
continue
if m.group('args'):
# Extract the frame pointer from the 'args' group.
m_frame = _FRAME_POINTER_RE.match(m.group('args'))
if m_frame and m_frame.group('address'):
trace_hash ^= int(m_frame.group('address'), 16)
address = m.group('address')
module = m.group('module')
location = m.group('location')
if address:
output_trace.append(address)
else:
module = NormalizeChromeSymbol(module)
if location:
location = NormalizeChromeSymbol(location)
else:
location = 'unknown'
frame = '%s!%s' % (module, location)
output_trace.append(frame)
return (output_trace, trace_hash)
def DebugStructToDict(structure):
"""Converts a structure as printed by the debugger into a dictionary. The
structure should have the following format:
field1 : value1
field2 : value2
...
Args:
structure: The structure to convert.
Returns:
A dict containing the values stored in the structure.
"""
ret = dict()
for entry in structure:
if not entry.find(':'):
continue
key = entry[:entry.find(':')]
value = entry[entry.find(':') + 1:]
ret[key.rstrip().lstrip()] = value.rstrip().lstrip()
return ret
def GetCorruptHeapInfo(debugger, bad_access_info_vals, bad_access_info_frame,
from_uef):
"""Extract the information stored in the minidump about the heap corruption.
Args:
debugger: A handle to a cdb debugging session.
bad_access_info_vals: A dictionary containing the information about the
invalid access.
bad_access_info_frame: The number of the frame containing the error_info
structure.
from_uef: Indicates if the error has been caught by the unhandled exception
filter.
Returns:
A list of corrupt ranges, each of them containing the information about the
corrupt blocks in it.
"""
# Reset the debugger context and jump to the frame containing the information.
corrupt_range_count = int(bad_access_info_vals['corrupt_range_count'], 16)
debugger.Command('.cxr; .frame %X' % bad_access_info_frame)
corrupt_ranges = []
# Iterates over the corrupt ranges.
for corrupt_range_idx in range(0, corrupt_range_count):
corrupt_range_info = []
# When using the '??' operator in a debugging session to evaluate a
# structure the offsets gets printed, this regex allows their removal.
struct_field_re = re.compile('\s+\+0x[0-9a-f]+\s*(.*)')
operand = '.' if from_uef else '->'
# Get the information about this corrupt range.
for line in debugger.Command(
'?? ((syzyasan_rtl!agent::asan::AsanCorruptBlockRange*)'
'(error_info%scorrupt_ranges))[0x%x]' % (operand, corrupt_range_idx)):
m = struct_field_re.match(line)
if m:
corrupt_range_info.append(m.group(1))
corrupt_range_info_vals = DebugStructToDict(corrupt_range_info)
block_info_count = int(corrupt_range_info_vals['block_info_count'])
corrupt_range_info_vals['block_info'] = []
# Iterates over the block info structure available for this range.
for block_info_idx in range(0, block_info_count):
# Retrieves the information about the current block info structure.
block_info = []
for line in debugger.Command(
'?? ((syzyasan_rtl!agent::asan::AsanCorruptBlockRange*)'
'(error_info%scorrupt_ranges))[%d].block_info[%d]' % (
operand, corrupt_range_idx, block_info_idx)):
m = struct_field_re.match(line)
if m:
block_info.append(m.group(1))
block_info_corruption_state = []
for line in debugger.Command(
'?? ((syzyasan_rtl!agent::asan::AsanCorruptBlockRange*)'
'(error_info%scorrupt_ranges))[%d].block_info[%d].analysis' % (
operand, corrupt_range_idx, block_info_idx)):
m = struct_field_re.match(line)
if m:
block_info_corruption_state.append(m.group(1))
block_info_vals = DebugStructToDict(block_info)
block_info_corruption_state_vals = DebugStructToDict(
block_info_corruption_state)
block_info_vals.pop('analysis', None)
for e in block_info_corruption_state_vals:
block_info_vals['analysis.%s' % e] = block_info_corruption_state_vals[e]
# Get the allocation stack trace for this block info structure.
block_info_vals['alloc_stack'], _ = NormalizeStackTrace(debugger.Command(
_GET_CORRUPT_BLOCK_STACK_TRACE_TEMPLATE.format(type='alloc',
operand=operand, range_idx=corrupt_range_idx,
block_idx=block_info_idx)))
# Get the free stack trace for this block info structure.
block_info_vals['free_stack'], _ = NormalizeStackTrace(debugger.Command(
_GET_CORRUPT_BLOCK_STACK_TRACE_TEMPLATE.format(type='free',
operand=operand, range_idx=corrupt_range_idx,
block_idx=block_info_idx)))
# Get the block content.
block_address = block_info_vals['header'].split(' ')[0]
block_info_vals['block_content'] = []
block_content = debugger.Command('db %s+0x10 L0x80' % block_address)
# Match a block data line as printed by Windbg. This helps to get rid of
# the extra characters that we sometime see at the beginning of the
# lines ('0:000>').
line_cleanup_re = re.compile('^\d\:\d+>\s*(.*)')
for line in block_content:
m = line_cleanup_re.match(line)
if m:
line = m.group(1)
block_info_vals['block_content'].append(line)
corrupt_range_info_vals['block_info'].append(block_info_vals)
# Append the information about the current range to the list of corrupt
# ranges.
corrupt_ranges.append(corrupt_range_info_vals)
return corrupt_ranges
class ScopedDebugger(subprocess.Popen):
"""A scoped debugger instance.
"""
def __init__(self, debugger_path, minidump_filename):
"""Initialize the debugger instance.
Args:
debugger_path: The debugger's patth.
minidump_filename: The minidump filename.
"""
super(ScopedDebugger, self).__init__([debugger_path,
'-z', minidump_filename],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def __enter__(self):
"""This debugger should be instantiated via a 'with' statement to ensure
that its resources are correctly closed.
"""
return self
def __exit__(self, e_type, value, traceback):
"""Terminate the debugger process. This is executed when the instance of
this debugger is created with a 'with' statement.
"""
self.StopDebugger()
def StopDebugger(self):
"""Terminate the debugger process. We could send the terminate command ('q')
to the debugger directly but at this point the debugger might be stuck
because of a previous command and it's just faster to kill the process
anyway.
"""
self.terminate()
def Command(self, command):
"""Execute a command in the debugger instance.
Args:
command: The command to execute.
Returns:
The output of the debugger after running this command.
"""
self.stdin.write(command + '; .echo %s\n' % _SENTINEL)
lines = []
while True:
line = self.stdout.readline().rstrip()
# Sometimes the sentinel value is preceded by something like '0:000> '.
if line.endswith(_SENTINEL):
break
lines.append(line)
return lines
def LoadSymbols(self, pdb_path):
"""Loads the pdbs for the loaded modules if they are present in |pdb_path|
Args:
pdb_path: The path containing the pdbs.
"""
pdbs = [f for f in os.listdir(pdb_path) if f.endswith('.pdb')]
# The path needs to be quoted to avoid including the sentinel value in cdb's
# symbol search path.
self.Command('.sympath \"%s\"' % pdb_path)
for line in self.Command('lm n'):
m = _MODULE_MATCH_RE.match(line)
if m is None:
continue
image_name = m.group('image_name')
if image_name is None:
continue
pdb_name = image_name + '.pdb'
if pdb_name in pdbs:
self.Command('.reload /fi %s' % image_name)
self.Command('.symfix')
def ProcessMinidump(minidump_filename, cdb_path, pdb_path):
"""Process a minidump.
This analyzes the error contained in the minidump and returns the crash report
for it.
Args:
minidump_filename: The minidump filename.
cdb_path: The path to cdb.exe.
pdb_path: (Optional) The path to the pdbs for the loaded modules.
Returns:
The crash report to be printed.
"""
if not os.path.exists(minidump_filename):
return
with ScopedDebugger(cdb_path, minidump_filename) as debugger:
if pdb_path is not None:
debugger.LoadSymbols(pdb_path)
# Enable the line number information.
debugger.Command('.lines')
# Get the SyzyASan crash stack and try to find the frame containing the
# bad access info structure.
asan_crash_stack = debugger.Command('kv')
bad_access_info_frame = 0;
crash_lines, _ = NormalizeStackTrace(asan_crash_stack)
# Indicates if this bug has been caught by the unhandled exception filter.
from_uef = False
for line in crash_lines:
if not any(line.find(b) != -1 for b in _BAD_ACCESS_INFO_FRAMES):
bad_access_info_frame += 1
else:
if line.find('ExceptionFilter') != -1:
from_uef = True
break
if bad_access_info_frame == -1:
print ('Unable to find the frame containing the invalid access'
'informations for %d.' % minidump_filename)
return
# Get the information about this bad access.
debugger.Command('.frame %X' % bad_access_info_frame)
debugger.Command('kv')
bad_access_info = debugger.Command(_GET_BAD_ACCESS_INFO_COMMAND)
bad_access_block_info = debugger.Command(_GET_BLOCK_INFO_COMMAND)
# The first two lines contain no useful information, remove them.
bad_access_info.pop(0)
bad_access_info.pop(0)
bad_access_block_info.pop(0)
bad_access_block_info.pop(0)
bad_access_info_vals = DebugStructToDict(bad_access_info)
bad_access_info_vals.update(DebugStructToDict(bad_access_block_info))
# Checks if the heap is corrupt.
heap_is_corrupt = bad_access_info_vals['heap_is_corrupt'] == '1'
# Cleans the enum value stored in the dictionary.
for key in bad_access_info_vals:
m = _ENUM_VAL_RE.match(bad_access_info_vals[key])
if m:
bad_access_info_vals[key] = m.group('literal_value')
debugger.Command('.ecxr')
crash_stack, crash_stack_hash = NormalizeStackTrace(
debugger.Command('kv'))
# If the heap is not corrupt and the error type indicates an invalid or wild
# address then there's no useful information that we can report.
if not heap_is_corrupt and (
bad_access_info_vals['error_type'] == 'INVALID_ADDRESS' or
bad_access_info_vals['error_type'] == 'WILD_ACCESS'):
report = ASanReport(bad_access_info=bad_access_info_vals,
crash_stack=crash_stack,
crash_stack_hash=crash_stack_hash,
alloc_stack=None,
alloc_stack_hash=None,
free_stack=None,
free_stack_hash=None,
corrupt_heap_info=None,
from_uef=None)
return report
def GetStackAndStackHashFromErrorInfoStruct(debugger, stack_type, is_ptr):
assert stack_type in ['alloc', 'free']
command = _GET_STACK_COMMAND_TEMPLATE.format(type=stack_type,
operand='->' if is_ptr else '.')
return NormalizeStackTrace(debugger.Command(command))
debugger.Command('.cxr; .frame %X' % bad_access_info_frame)
alloc_stack, alloc_stack_hash = GetStackAndStackHashFromErrorInfoStruct(
debugger, 'alloc', is_ptr=not from_uef)
free_stack, free_stack_hash = GetStackAndStackHashFromErrorInfoStruct(
debugger, 'free', is_ptr=not from_uef)
corrupt_heap_info = None
if heap_is_corrupt:
corrupt_heap_info = GetCorruptHeapInfo(debugger,
bad_access_info_vals,
bad_access_info_frame, from_uef)
report = ASanReport(bad_access_info=bad_access_info_vals,
crash_stack=crash_stack,
crash_stack_hash=crash_stack_hash,
alloc_stack=alloc_stack,
alloc_stack_hash=alloc_stack_hash,
free_stack=free_stack,
free_stack_hash=free_stack_hash,
corrupt_heap_info=corrupt_heap_info,
from_uef=from_uef)
return report
def PrintASanReport(report, file_handle=sys.stdout):
"""Print a crash report.
Args:
report: The report to print.
file_handle: A handle to the out stream, by default we print the report to
stdout.
"""
file_handle.write('Bad access information:\n')
for key in report.bad_access_info:
file_handle.write(' %s: %s\n' % (key, report.bad_access_info[key]))
file_handle.write('\nCrash stack:\n')
if report.crash_stack and len(report.crash_stack) != 0:
for line in report.crash_stack:
file_handle.write('%s\n' % line)
if report.alloc_stack and len(report.alloc_stack) != 0:
file_handle.write('\nAllocation stack:\n')
for line in report.alloc_stack:
file_handle.write('%s\n' % line)
if report.free_stack and len(report.free_stack) != 0:
file_handle.write('\nFree stack:\n')
for line in report.free_stack:
file_handle.write('%s\n' % line)
if report.corrupt_heap_info:
file_handle.write('\n\nHeap is corrupt, here\'s some information about the '
'corrupt ranges.\n\n')
corrupt_range_idx = 0
for corrupt_heap_range in report.corrupt_heap_info:
file_handle.write('Corrupt range #%d\n' % corrupt_range_idx)
corrupt_range_idx += 1
file_handle.write(' Address : %s\n' % corrupt_heap_range['address'])
file_handle.write(' Length : %s\n' % corrupt_heap_range['length'])
file_handle.write(' Block count : %s\n' %
corrupt_heap_range['block_count'])
file_handle.write(' Block info count : %s\n' %
corrupt_heap_range['block_info_count'])
file_handle.write(' Block infos:\n')
block_info_idx = 0
for block_info in corrupt_heap_range['block_info']:
file_handle.write(' Block info #%d\n' % block_info_idx)
for field in sorted(block_info):
if not field.endswith('stack') and field != ('block_content'):
file_handle.write(' %s : %s\n' % (field, block_info[field]))
file_handle.write(' Alloc stack:\n')
for frame in block_info['alloc_stack']:
file_handle.write(' %s\n' % frame)
if block_info['free_stack']:
file_handle.write(' Free stack:\n')
for frame in block_info['free_stack']:
file_handle.write(' %s\n' % frame)
file_handle.write(' Block content:\n')
for line in block_info['block_content']:
file_handle.write(' %s\n' % line)
file_handle.write('\n\n%s\n' % _ERROR_HELP_URL)
_USAGE = """\
%prog [options] <minidumps>
Symbolizes a list of minidumps that has been generated by SyzyASan. For each of
them this prints the crash, alloc and free stack traces and gives more
information about the crash.
"""
def _ParseArguments():
"""Parse the command line arguments.
Returns:
The options on the command line and the list of minidumps to process.
"""
parser = optparse.OptionParser(usage=_USAGE)
parser.add_option('--cdb-path', help='(Optional) The path to cdb.exe.')
parser.add_option('--pdb-path',
help='(Optional) The path to the folder containing the'
' PDBs.')
(opts, args) = parser.parse_args()
if not opts.cdb_path:
for path in _DEFAULT_CDB_PATHS:
if os.path.isfile(path):
opts.cdb_path = path
break
if not opts.cdb_path:
parser.error('Unable to find cdb.exe.')
return opts, args
def main():
"""Parse arguments and do the symbolization."""
opts, minidumps = _ParseArguments()
for minidump in minidumps:
report = ProcessMinidump(minidump, opts.cdb_path, opts.pdb_path)
if report:
print 'Report for %s' % minidump
PrintASanReport(report)
print '\n'
else:
print 'Error while processing %s' % minidump
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "3639f914c12171e9862881cf06df8744",
"timestamp": "",
"source": "github",
"line_count": 622,
"max_line_length": 80,
"avg_line_length": 35.27009646302251,
"alnum_prop": 0.6287719938007111,
"repo_name": "wangming28/syzygy",
"id": "b2bf6778de0a9a39ce07b8a89017874e63a0bbd6",
"size": "22543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "syzygy/scripts/asan/minidump_symbolizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "26942"
},
{
"name": "Batchfile",
"bytes": "21191"
},
{
"name": "C",
"bytes": "12371"
},
{
"name": "C++",
"bytes": "8336297"
},
{
"name": "CSS",
"bytes": "1333"
},
{
"name": "HTML",
"bytes": "3182"
},
{
"name": "Protocol Buffer",
"bytes": "9292"
},
{
"name": "Python",
"bytes": "888571"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# Sphinx naming conflict with "setup"
import setup as testmill
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['autocmd', 'sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ravello TestMill'
copyright = u'2013, Ravello Systems'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = testmill.version_info['version']
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'RavelloTestMilldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'RavelloTestMill.tex', u'Ravello TestMill Documentation',
u'Geert Jansen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ravellotestmill', u'Ravello TestMill Documentation',
[u'Geert Jansen'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'RavelloTestMill', u'Ravello TestMill Documentation',
u'Geert Jansen', 'RavelloTestMill', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "c0b12179801218a21730f46cbf59bd26",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 80,
"avg_line_length": 32.39484978540773,
"alnum_prop": 0.7053524112347642,
"repo_name": "ravello/testmill",
"id": "85f399cf232308ecb3beac6c6f3f0bb69425f8b2",
"size": "7975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "667"
},
{
"name": "Java",
"bytes": "1225"
},
{
"name": "Python",
"bytes": "266824"
},
{
"name": "Shell",
"bytes": "4770"
}
],
"symlink_target": ""
} |
"""Utilities for admins."""
import csv
import ipaddress
import json
import logging
import random
from LatLon23 import string2latlon
from auvsi_suas.proto import interop_admin_api_pb2
from auvsi_suas.views.decorators import require_superuser
from auvsi_suas.views.json import ProtoJsonEncoder
from django import shortcuts
from django.contrib.auth import get_user_model
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.utils.decorators import method_decorator
from django.views.generic import View
from google.protobuf import json_format
logger = logging.getLogger(__name__)
LATLON_FORMAT = 'H%d%-%m%-%S'
INTEROP_SERVER_IP = '10.10.130.10'
INTEROP_SERVER_PORT = 80
INTEROP_TEAM_STATIC_RANGE_MIN = '10.10.130.20'
INTEROP_TEAM_STATIC_RANGE_MAX = '10.10.130.119'
class GpsConversion(View):
"""Converts GPS from string to decimal."""
@method_decorator(require_superuser)
def dispatch(self, *args, **kwargs):
return super(GpsConversion, self).dispatch(*args, **kwargs)
def post(self, request):
request_proto = interop_admin_api_pb2.GpsConversionRequest()
try:
json_format.Parse(request.body, request_proto)
except Exception as e:
return HttpResponseBadRequest(
'Failed to parse request. Error: %s' % str(e))
if not request_proto.HasField(
'latitude') or not request_proto.HasField('longitude'):
return HttpResponseBadRequest('Request missing fields.')
try:
latlon = string2latlon(request_proto.latitude,
request_proto.longitude, LATLON_FORMAT)
except Exception as e:
return HttpResponseBadRequest('Failed to convert GPS. Error: %s' %
str(e))
response = interop_admin_api_pb2.GpsConversionResponse()
response.latitude = latlon.lat.decimal_degree
response.longitude = latlon.lon.decimal_degree
return HttpResponse(json_format.MessageToJson(response),
content_type="application/json")
class BulkCreateTeams(View):
"""Creates teams based on CSV file and renders a printable webpage."""
@method_decorator(require_superuser)
def dispatch(self, *args, **kwargs):
return super(BulkCreateTeams, self).dispatch(*args, **kwargs)
def post(self, request):
# Generate the base context for credentials.
context = {
'server': {
'ip': INTEROP_SERVER_IP,
'port': INTEROP_SERVER_PORT,
},
'teams': [],
}
# Compute the numeric form of IP address.
# Compute the range for wrap-around.
static_min = int(
ipaddress.IPv4Address(str(INTEROP_TEAM_STATIC_RANGE_MIN)))
static_max = int(
ipaddress.IPv4Address(str(INTEROP_TEAM_STATIC_RANGE_MAX)))
static_range = static_max - static_min
# Load the CSV input, generate team credentials.
random.seed()
csvreader = csv.DictReader(
request.FILES['file'].read().decode().splitlines())
for i, row in enumerate(csvreader):
context['teams'].append({
'university':
row['University'],
'name':
row['Name'],
'username':
row['Username'],
'password':
random.randint(1e9, 1e10),
'ip':
str(ipaddress.IPv4Address(static_min + (i % static_range))),
})
# Insert the user accounts.
# Trim fields due to database constraints.
for team in context['teams']:
get_user_model().objects.create_user(
username=team['username'],
password=team['password'],
first_name=team['name'][:30],
last_name=team['university'][:30])
# Render a printable page.
return shortcuts.render(request, 'bulk_create_teams.html', context)
| {
"content_hash": "94514ca8564a8940233a3884d85cdd33",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 78,
"avg_line_length": 35.55652173913043,
"alnum_prop": 0.6089508437270726,
"repo_name": "auvsi-suas/interop",
"id": "e7ea08321aa04002201195fb72363736090c5e2c",
"size": "4089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/auvsi_suas/views/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1209"
},
{
"name": "Dockerfile",
"bytes": "2748"
},
{
"name": "HTML",
"bytes": "10321"
},
{
"name": "JavaScript",
"bytes": "20184"
},
{
"name": "Python",
"bytes": "445755"
},
{
"name": "Shell",
"bytes": "7377"
}
],
"symlink_target": ""
} |
class NoSyncDbRouter(object):
using = None
schema = None
app_labels = []
def db_for_read(self, model, **hints):
if model._meta.app_label in self.app_labels:
return self.using
def db_for_write(self, model, **hints):
if model._meta.app_label in self.app_labels:
return self.using
def schema_for_db(self, model, database, **hints):
if model._meta.app_label in self.app_labels:
return self.schema
def allow_syncdb(self, db, model):
pass
class VariantsRouter(NoSyncDbRouter):
schema = 'variants'
app_labels = ['variants']
class SamplesRouter(NoSyncDbRouter):
schema = 'samples'
app_labels = ['samples']
class DiseasesRouter(NoSyncDbRouter):
schema = 'diseases'
app_labels = ['diseases']
class GenesRouter(NoSyncDbRouter):
schema = 'genes'
app_labels = ['genes']
class LiteratureRouter(NoSyncDbRouter):
schema = 'literature'
app_labels = ['literature']
class SourcesRouter(NoSyncDbRouter):
schema = 'sources'
app_labels = ['sources']
class GenomeRouter(NoSyncDbRouter):
schema = 'genome'
app_labels = ['genome']
| {
"content_hash": "26d5435107947435083a4c88004eb706",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 54,
"avg_line_length": 21.77777777777778,
"alnum_prop": 0.6394557823129252,
"repo_name": "chop-dbhi/varify",
"id": "0398a6378be4160963ce729df66846836e79d42a",
"size": "1176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "varify/conf/routers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "80972"
},
{
"name": "JavaScript",
"bytes": "2399168"
},
{
"name": "Puppet",
"bytes": "14585"
},
{
"name": "Python",
"bytes": "210110"
},
{
"name": "Ruby",
"bytes": "1186"
},
{
"name": "Shell",
"bytes": "37"
}
],
"symlink_target": ""
} |
import copy
import pprint
import ipaddr
import netaddr
from neutronclient.common import exceptions as neutron_exc
from neutronclient.v2_0 import client as neutron_client
from cloudferrylib.base import exception
from cloudferrylib.base import network
from cloudferrylib.os.identity import keystone as ksresource
from cloudferrylib.utils import cache
from cloudferrylib.utils import log
from cloudferrylib.utils import utils as utl
LOG = log.getLogger(__name__)
DEFAULT_SECGR = 'default'
@cache.Cached(getter='get_subnets_list', modifier='create_network')
@cache.Cached(getter='get_networks_list', modifier='create_network')
@cache.Cached(getter='get_ports_list', modifier='create_port')
class NeutronNetwork(network.Network):
"""
The main class for working with OpenStack Neutron client
"""
def __init__(self, config, cloud):
super(NeutronNetwork, self).__init__(config)
self.cloud = cloud
self.identity_client = cloud.resources[utl.IDENTITY_RESOURCE]
self.filter_tenant_id = None
self.ext_net_map = \
utl.read_yaml_file(self.config.migrate.ext_net_map) or {}
self.mysql_connector = cloud.mysql_connector('neutron')
@property
def neutron_client(self):
return self.proxy(self.get_client(), self.config)
def get_client(self):
return neutron_client.Client(
username=self.config.cloud.user,
password=self.config.cloud.password,
tenant_name=self.config.cloud.tenant,
auth_url=self.config.cloud.auth_url,
cacert=self.config.cloud.cacert,
insecure=self.config.cloud.insecure,
region_name=self.config.cloud.region
)
def read_info(self, **kwargs):
"""Get info about neutron resources:
:rtype: Dictionary with all necessary neutron info
"""
if kwargs.get('tenant_id'):
tenant_id = self.filter_tenant_id = kwargs['tenant_id'][0]
else:
tenant_id = ''
nets = self.get_networks(tenant_id)
subnets = self.get_subnets(tenant_id)
detached_ports = self.get_detached_ports(tenant_id)
LOG.debug('List of detached ports: %s',
repr([p['id'] for p in detached_ports]))
if self.filter_tenant_id is not None:
shared_nets = self.get_shared_networks_raw()
for net in shared_nets:
# do not include the same network twice
if net['id'] in [n['id'] for n in nets]:
continue
nets.append(self.convert_networks(net, self.cloud))
LOG.debug("Got shared network ID %s", net['id'])
# Append subnets from the shared networks
for subnet in net['subnets']:
# do not include the same subnets twice
if subnet['id'] in [sn['id'] for sn in subnets]:
continue
subnets.append(self.convert_subnets(subnet, self.cloud))
LOG.debug("Got shared subnet ID %s", subnet['id'])
full_nets_list = self.get_networks()
else:
full_nets_list = nets
# Get full list off busy segmentation IDs
used_seg_ids = get_segmentation_ids_from_net_list(full_nets_list)
routers = self.get_routers()
subnet_ids = {sn['id'] for sn in subnets}
for router in routers:
router['subnet_ids'] = [sn_id for sn_id in router['subnet_ids']
if sn_id in subnet_ids]
info = {'networks': nets,
'subnets': subnets,
'routers': routers,
'detached_ports': detached_ports,
'floating_ips': self.get_floatingips(tenant_id),
'security_groups': self.get_sec_gr_and_rules(tenant_id),
'quota': self.get_quota(tenant_id),
'meta': {
'segmentation_ids': used_seg_ids
}}
if self.config.migrate.keep_lbaas:
info['lbaas'] = dict()
info['lb_pools'] = self.get_lb_pools(tenant_id)
info['lb_monitors'] = self.get_lb_monitors(tenant_id)
info['lb_members'] = self.get_lb_members(tenant_id)
info['lb_vips'] = self.get_lb_vips(tenant_id)
return info
def show_quota(self, tenant_id=''):
return self.neutron_client.show_quota(tenant_id)
def list_quotas(self):
return self.neutron_client.list_quotas()['quotas']
def get_quota(self, tenant_id):
# return structure {'name_tenant': {'subnet': 10, ...}, ...}
tenants = {}
if not tenant_id:
tenants_obj = self.identity_client.get_tenants_list()
tenants = {t.id: t.name for t in tenants_obj}
else:
tenants[tenant_id] = self.identity_client.\
try_get_tenant_name_by_id(tenant_id)
data = {
}
if self.config.network.get_all_quota:
for t_id, t_val in tenants.iteritems():
data[t_val] = self.neutron_client.show_quota(t_id)
else:
for t in self.neutron_client.list_quotas()['quotas']:
if (not tenant_id) or (tenant_id == t['tenant_id']):
tenant_name = self.identity_client.\
try_get_tenant_name_by_id(t['tenant_id'])
data[tenant_name] = {k: v
for k, v in t.iteritems()
if k != 'tenant_id'}
return data
def upload_quota(self, quota):
identity = self.identity_client
for q_name, q_val in quota.iteritems():
tenant_id = identity.get_tenant_id_by_name(q_name)
self.neutron_client.update_quota(tenant_id, q_val)
def create_quota(self, tenant_id, quota):
return self.neutron_client.update_quota(tenant_id, quota)
def required_tenants(self, filter_tenant_id=None):
old_filter_tanant_id = self.filter_tenant_id
self.filter_tenant_id = filter_tenant_id
tenant_ids = set()
for shared_net in self.get_shared_networks_raw():
tenant_ids.add(shared_net['tenant_id'])
for router in self.get_routers_raw():
tenant_ids.add(router['tenant_id'])
self.filter_tenant_id = old_filter_tanant_id
return list(tenant_ids)
def deploy(self, info):
"""
Deploy network resources to DST
Have non trivial behavior when enabled keep_floatingip and
change_router_ips. Example:
Initial state:
src cloud with router external ip 123.0.0.5
and FloatingIP 123.0.0.4
Migrate resources:
1. Move FloatingIP to dst. On dst we have FloatingIP 123.0.0.4
2. Create FloatingIP on dst as stub for router IP.
On dst we have two FloatingIP [123.0.0.4, 123.0.0.5].
IP 123.0.0.5 exists only in OpenStack DB and not crush
src network.
3. Create router on dst. (here is the main idea) As you see above,
ips 123.0.0.4 and 123.0.0.5 already allocated,
then OpenStack must allocate another ip for router
(e.g. 123.0.0.6).
4. FloatingIP 123.0.0.5 is not needed anymore.
We use it on 1.3. step for not allow OpenStack create
router with this ip. It will be released if you enable
clean_router_ips_stub in config
After resource migration we have:
src router external ip 123.0.0.5 and FloatingIP 123.0.0.4
dst router external ip 123.0.0.6 and FloatingIP 123.0.0.4
"""
deploy_info = info
self.upload_quota(deploy_info['quota'])
self.upload_networks(deploy_info['networks'],
deploy_info['meta']['segmentation_ids'],
deploy_info['detached_ports'])
dst_router_ip_ids = None
if self.config.migrate.keep_floatingip:
self.upload_floatingips(deploy_info['networks'],
deploy_info['floating_ips'])
if self.config.migrate.change_router_ips:
subnets_map = {subnet['id']: subnet
for subnet in deploy_info['subnets']}
router_ips = self.extract_router_ips_as_floating_ips(
subnets_map, deploy_info['routers'])
dst_router_ip_ids = self.upload_floatingips(
deploy_info['networks'], router_ips)
self.upload_routers(deploy_info['networks'],
deploy_info['subnets'],
deploy_info['routers'])
if self.config.migrate.clean_router_ips_stub and dst_router_ip_ids:
for router_ip_stub in dst_router_ip_ids:
self.neutron_client.delete_floatingip(router_ip_stub)
self.upload_neutron_security_groups(deploy_info['security_groups'])
self.upload_sec_group_rules(deploy_info['security_groups'])
if self.config.migrate.keep_lbaas:
self.upload_lb_pools(deploy_info['lb_pools'],
deploy_info['subnets'])
self.upload_lb_monitors(deploy_info['lb_monitors'])
self.associate_lb_monitors(deploy_info['lb_pools'],
deploy_info['lb_monitors'])
self.upload_lb_members(deploy_info['lb_members'],
deploy_info['lb_pools'])
self.upload_lb_vips(deploy_info['lb_vips'],
deploy_info['lb_pools'],
deploy_info['subnets'])
return deploy_info
def extract_router_ips_as_floating_ips(self, subnets, routers_info):
result = []
tenant = self.config.migrate.router_ips_stub_tenant
for router_info in routers_info:
router = Router(router_info, subnets)
tenant_name = tenant if tenant else router.tenant_name
if router.ext_net_id:
result.append({'tenant_name': tenant_name,
'floating_network_id': router.ext_net_id,
'floating_ip_address': router.ext_ip})
return result
def get_mac_by_ip(self, ip_address, instance_id):
for port in self.get_ports_list(device_id=instance_id):
for fixed_ip_info in port['fixed_ips']:
if fixed_ip_info['ip_address'] == ip_address:
return port["mac_address"]
def get_instance_network_info(self, instance_id):
ports = []
for port in self.get_ports_list(device_id=instance_id):
ports.append({
'ip_addresses': [x['ip_address'] for x in port['fixed_ips']],
'mac_address': port['mac_address'],
'floatingip': self.get_port_floating_ip(port['id']),
'allowed_address_pairs': port.get('allowed_address_pairs', []),
})
return ports
def get_port_floating_ip(self, port_id):
floating_ips = self.neutron_client.list_floatingips(
port_id=port_id)['floatingips']
if floating_ips:
LOG.debug('Got %d floating IP for port %s',
len(floating_ips), port_id)
return floating_ips[0]['floating_ip_address']
else:
return None
def get_ports_list(self, **kwargs):
return self.neutron_client.list_ports(**kwargs)['ports']
def create_port(self, net_id, mac_address, ip_addresses, tenant_id,
keep_ip, sg_ids=None, allowed_address_pairs=None):
param_create_port = {'network_id': net_id,
'tenant_id': tenant_id}
if mac_address:
param_create_port['mac_address'] = mac_address
if sg_ids:
param_create_port['security_groups'] = sg_ids
if keep_ip:
param_create_port['fixed_ips'] = [{"ip_address": ip}
for ip in ip_addresses]
if allowed_address_pairs is not None:
param_create_port['allowed_address_pairs'] = allowed_address_pairs
with ksresource.AddAdminUserToNonAdminTenant(
self.identity_client.keystone_client,
self.config.cloud.user,
self.config.cloud.tenant):
LOG.debug("Creating port IP '%s', MAC '%s' on net '%s'",
param_create_port.get('fixed_ips'), mac_address, net_id)
return self.neutron_client.create_port(
{'port': param_create_port})['port']
def delete_port(self, port_id):
return self.neutron_client.delete_port(port_id)
def get_network(self, network_info, tenant_id, keep_ip=False):
if keep_ip:
addresses = [ipaddr.IPAddress(ip)
for ip in network_info['ip_addresses']]
private = self.neutron_client.list_networks(
tenant_id=tenant_id)['networks']
shared = self.neutron_client.list_networks(shared=True)['networks']
for net in private + shared:
subnets = self.neutron_client.list_subnets(
network_id=net['id'])['subnets']
if all(any(ipaddr.IPNetwork(subnet['cidr']).Contains(ip)
for subnet in subnets)
for ip in addresses):
return net
if 'id' in network_info:
networks = self.neutron_client.list_networks(
id=network_info['id'])['networks']
if len(networks) > 0:
return networks[0]
if 'name' in network_info:
networks = self.neutron_client.list_networks(
name=network_info['name'])['networks']
if len(networks) > 0:
return networks[0]
LOG.error('Failed to find network %s in tenant %s; keep_ip = %s',
repr(network_info), tenant_id, keep_ip)
raise exception.AbortMigrationError("Can't find suitable network")
def check_existing_port(self, network_id, mac=None, ip_address=None,
ip_addresses=None, existing_ports=None):
if ip_addresses is None:
ip_addresses = []
if ip_address is not None and ip_address not in ip_addresses:
ip_addresses.append(ip_address)
if existing_ports is None:
existing_ports = self.get_ports_list(
fields=['network_id', 'mac_address', 'id', 'fixed_ips',
'device_owner'],
network_id=network_id)
for port in existing_ports:
if port['network_id'] != network_id:
continue
if port['mac_address'] == mac:
return port
for fixed_ip in port['fixed_ips']:
if fixed_ip['ip_address'] in ip_addresses:
return port
return None
@staticmethod
def convert(neutron_object, cloud, obj_name):
"""Convert OpenStack Neutron network object to CloudFerry object.
:param neutron_object: Direct OS NeutronNetwork object to convert,
:cloud: Cloud object,
:obj_name: Name of NeutronNetwork object to convert.
List of possible values:
'network', 'subnet', 'router', 'floating_ip',
'security_group', 'rule'.
"""
obj_map = {
'network': NeutronNetwork.convert_networks,
'subnet': NeutronNetwork.convert_subnets,
'router': NeutronNetwork.convert_routers,
'floating_ip': NeutronNetwork.convert_floatingips,
'security_group': NeutronNetwork.convert_security_groups,
'rule': NeutronNetwork.convert_rules,
'lb_pool': NeutronNetwork.convert_lb_pools,
'lb_member': NeutronNetwork.convert_lb_members,
'lb_monitor': NeutronNetwork.convert_lb_monitors,
'lb_vip': NeutronNetwork.convert_lb_vips
}
return obj_map[obj_name](neutron_object, cloud)
def convert_networks(self, net, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func()
subnets = []
subnets_hash = set()
for subnet in net['subnets']:
snet = self.convert_subnets(subnet, cloud)
subnets.append(snet)
subnets_hash.add(snet['res_hash'])
result = {
'name': net['name'],
'id': net['id'],
'admin_state_up': net['admin_state_up'],
'shared': net['shared'],
'tenant_id': net['tenant_id'],
'tenant_name': get_tenant_name(net['tenant_id']),
'subnets': subnets,
'router:external': net['router:external'],
'provider:physical_network': net['provider:physical_network'],
'provider:network_type': net['provider:network_type'],
'provider:segmentation_id': net['provider:segmentation_id'],
'subnets_hash': subnets_hash,
'meta': {},
}
res_hash = net_res.get_resource_hash(result,
'name',
'shared',
'tenant_name',
'router:external',
'admin_state_up',
'provider:physical_network',
'provider:network_type')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_subnets(snet, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
network_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func()
networks_list = network_res.get_networks_list()
net = get_network_from_list_by_id(snet['network_id'], networks_list)
cidr = str(netaddr.IPNetwork(snet['cidr']).cidr)
result = {
'name': snet['name'],
'id': snet['id'],
'enable_dhcp': snet['enable_dhcp'],
'allocation_pools': snet['allocation_pools'],
'gateway_ip': snet['gateway_ip'],
'ip_version': snet['ip_version'],
'cidr': cidr,
'network_name': net['name'],
'external': net['router:external'],
'network_id': snet['network_id'],
'tenant_name': get_tenant_name(snet['tenant_id']),
'dns_nameservers': snet['dns_nameservers'],
'meta': {},
}
res_hash = network_res.get_resource_hash(result,
'name',
'enable_dhcp',
'ip_version',
'gateway_ip',
'cidr',
'allocation_pools',
'tenant_name',
'network_name')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_routers(router, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func()
result = {
'name': router['name'],
'id': router['id'],
'admin_state_up': router['admin_state_up'],
'external_gateway_info': router['external_gateway_info'],
'tenant_name': get_tenant_name(router['tenant_id']),
'meta': {},
}
result.update(net_res.get_ports_info(router))
if router['external_gateway_info']:
networks_list = net_res.get_networks_list()
ext_id = router['external_gateway_info']['network_id']
ext_net = get_network_from_list_by_id(ext_id, networks_list)
result['ext_net_name'] = ext_net['name']
result['ext_net_tenant_name'] = get_tenant_name(
ext_net['tenant_id'])
result['ext_net_id'] = router['external_gateway_info'][
'network_id']
res_hash = net_res.get_resource_hash(result,
'name',
'tenant_name')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_floatingips(floating, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func()
networks_list = net_res.get_networks_list()
ext_id = floating['floating_network_id']
extnet = get_network_from_list_by_id(ext_id, networks_list)
result = {
'id': floating['id'],
'tenant_id': floating['tenant_id'],
'floating_network_id': ext_id,
'network_name': extnet['name'],
'ext_net_tenant_name': get_tenant_name(extnet['tenant_id']),
'tenant_name': get_tenant_name(floating['tenant_id']),
'fixed_ip_address': floating['fixed_ip_address'],
'floating_ip_address': floating['floating_ip_address'],
'port_id': floating['port_id'],
'meta': {},
}
return result
@staticmethod
def convert_rules(rule, cloud):
net_res = cloud.resources[utl.NETWORK_RESOURCE]
rule_hash = net_res.get_resource_hash(rule,
'direction',
'remote_ip_prefix',
'protocol',
'port_range_min',
'port_range_max',
'ethertype')
result = {
'remote_group_id': rule['remote_group_id'],
'direction': rule['direction'],
'remote_ip_prefix': rule['remote_ip_prefix'],
'protocol': rule['protocol'],
'port_range_min': rule['port_range_min'],
'port_range_max': rule['port_range_max'],
'ethertype': rule['ethertype'],
'security_group_id': rule['security_group_id'],
'rule_hash': rule_hash,
'meta': dict()
}
return result
@staticmethod
def convert_security_groups(sec_gr, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func(
return_default_tenant=False)
result = {
'name': sec_gr['name'],
'id': sec_gr['id'],
'tenant_id': sec_gr['tenant_id'],
'tenant_name': get_tenant_name(sec_gr['tenant_id']),
'description': sec_gr['description'],
'security_group_rules': [NeutronNetwork.convert(gr, cloud, 'rule')
for gr in sec_gr['security_group_rules']],
'meta': {},
}
res_hash = net_res.get_resource_hash(result,
'name',
'tenant_name',
'description')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_lb_pools(pool, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func(
return_default_tenant=False)
result = {
'name': pool['name'],
'id': pool['id'],
'description': pool['description'],
'lb_method': pool['lb_method'],
'protocol': pool['protocol'],
'subnet_id': pool['subnet_id'],
'provider': pool.get('provider'),
'tenant_id': pool['tenant_id'],
'tenant_name': get_tenant_name(pool['tenant_id']),
'health_monitors': pool['health_monitors'],
'members': pool['members'],
'meta': {}
}
res_hash = net_res.get_resource_hash(result,
'name',
'tenant_name',
'lb_method',
'protocol')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_lb_monitors(monitor, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func(
return_default_tenant=False)
result = {
'id': monitor['id'],
'tenant_id': monitor['tenant_id'],
'tenant_name': get_tenant_name(monitor['tenant_id']),
'type': monitor['type'],
'delay': monitor['delay'],
'timeout': monitor['timeout'],
'max_retries': monitor['max_retries'],
'url_path': monitor.get('url_path', None),
'expected_codes': monitor.get('expected_codes', None),
'pools': monitor.get('pools'),
'meta': {}
}
res_hash = net_res.get_resource_hash(result,
'tenant_name',
'type',
'delay',
'timeout',
'max_retries')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_lb_members(member, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func(
return_default_tenant=False)
result = {
'id': member['id'],
'pool_id': member['pool_id'],
'address': member['address'],
'protocol_port': member['protocol_port'],
'weight': member['weight'],
'tenant_id': member['tenant_id'],
'tenant_name': get_tenant_name(member['tenant_id']),
'meta': {}
}
res_hash = net_res.get_resource_hash(result,
'address',
'protocol_port',
'weight',
'tenant_name')
result['res_hash'] = res_hash
return result
@staticmethod
def convert_lb_vips(vip, cloud):
identity_res = cloud.resources[utl.IDENTITY_RESOURCE]
net_res = cloud.resources[utl.NETWORK_RESOURCE]
get_tenant_name = identity_res.get_tenants_func(
return_default_tenant=False)
result = {
'name': vip['name'],
'id': vip['id'],
'description': vip['description'],
'address': vip['address'],
'protocol': vip['protocol'],
'protocol_port': vip['protocol_port'],
'pool_id': vip['pool_id'],
'connection_limit': vip['connection_limit'],
'session_persistence': vip.get('session_persistence', None),
'tenant_id': vip['tenant_id'],
'subnet_id': vip['subnet_id'],
'tenant_name': get_tenant_name(vip['tenant_id']),
'meta': {}
}
res_hash = net_res.get_resource_hash(result,
'name',
'address',
'protocol',
'protocol_port',
'tenant_name')
result['res_hash'] = res_hash
return result
def get_shared_networks_raw(self):
"""Returns list of external and shared networks in raw neutron object
format"""
external = self.get_networks_raw({'router:external': True})
shared = self.get_networks_raw({'shared': True})
return external + shared
def get_networks_raw(self, search_dict):
"""Groups networks with subnets in raw `NeutronClient` format"""
neutron = self.neutron_client
nets = neutron.list_networks(**search_dict)['networks']
subnets_list = self.get_subnets_list()
for net in nets:
subnets = []
for subnet_id in net['subnets']:
subnets.append(get_subnet_from_list_by_id(subnet_id,
subnets_list))
net['subnets'] = subnets
return nets
def get_networks(self, tenant_id=''):
LOG.info("Get networks...")
networks = self.get_networks_raw({'tenant_id': tenant_id})
networks_info = []
for net in networks:
cf_net = self.convert_networks(net, self.cloud)
LOG.debug("Getting info about network '%s' (%s):\n%s",
cf_net['name'], cf_net['id'], pprint.pformat(cf_net))
networks_info.append(cf_net)
LOG.info("Done.")
return networks_info
def get_networks_list(self, tenant_id=''):
return self.neutron_client.list_networks(
tenant_id=tenant_id)['networks']
def get_subnets_list(self, tenant_id=''):
return self.neutron_client.list_subnets(tenant_id=tenant_id)['subnets']
def get_detached_ports(self, tenant_id=''):
ports = self.neutron_client.list_ports(tenant_id=tenant_id)['ports']
return [p for p in ports if not p['device_owner']]
def get_subnets(self, tenant_id=''):
LOG.info("Get subnets...")
subnets = self.get_subnets_list(tenant_id)
subnets_info = []
for snet in subnets:
subnet = self.convert(snet, self.cloud, 'subnet')
subnets_info.append(subnet)
LOG.info("Done")
return subnets_info
def reset_subnet_dhcp(self, subnet_id, dhcp_flag):
LOG.debug('Setting enable_dhcp to %s for subnet %s',
dhcp_flag, subnet_id)
subnet_info = {
'subnet':
{
'enable_dhcp': dhcp_flag
}
}
return self.neutron_client.update_subnet(subnet_id, subnet_info)
def get_ports_info(self, router):
LOG.debug("Finding all ports connected to router '%s'", router['name'])
ports_list = self.get_ports_list()
ports = get_ports_by_device_id_from_list(router['id'], ports_list)
subnet_ids = []
ips = []
for port in ports:
for ip_info in port['fixed_ips']:
ips.append(ip_info['ip_address'])
subnet_ids.append(ip_info['subnet_id'])
return {'ips': set(ips), 'subnet_ids': set(subnet_ids)}
def get_routers_raw(self):
routers = self.neutron_client.list_routers()['routers']
if self.filter_tenant_id:
subnet_ids = {
sn['id']
for sn in self.get_subnets_list(self.filter_tenant_id)}
return [r for r in routers
if (r['tenant_id'] == self.filter_tenant_id or
subnet_ids & self.get_ports_info(r)['subnet_ids'])]
return routers
def get_routers(self):
LOG.info("Get routers")
return [self.convert_routers(r, self.cloud)
for r in self.get_routers_raw()]
def get_floatingips(self, tenant_id=''):
LOG.info("Get floatingips...")
floatings = self.neutron_client.list_floatingips(
tenant_id=tenant_id)['floatingips']
floatingips_info = []
for floating in floatings:
floatingip_info = self.convert(floating, self.cloud, 'floating_ip')
floatingips_info.append(floatingip_info)
LOG.info("Done")
return floatingips_info
def get_security_groups(self, tenant_id=''):
return self.neutron_client.list_security_groups(
tenant_id=tenant_id)['security_groups']
def get_sec_gr_and_rules(self, tenant_id=''):
LOG.info("Getting security groups and rules...")
service_tenant_name = self.config.cloud.service_tenant
service_tenant_id = \
self.identity_client.get_tenant_id_by_name(service_tenant_name)
sec_grs = self.get_security_groups(tenant_id)
sec_groups_info = []
for sec_gr in sec_grs:
if sec_gr['tenant_id'] != service_tenant_id:
sec_gr_info = self.convert(sec_gr, self.cloud,
'security_group')
if not sec_gr_info['tenant_name']:
# Skip security group from undefined tenant
LOG.warning("Security group '%s' (%s) from tenant %s "
"has been skipped.", sec_gr['name'],
sec_gr['id'], sec_gr['tenant_id'])
continue
sec_groups_info.append(sec_gr_info)
LOG.info("Done")
return sec_groups_info
def get_lb_pools(self, tenant_id=''):
LOG.info("Getting load balancer pools...")
pools = self.neutron_client.list_pools(tenant_id=tenant_id)['pools']
pools_info = []
for pool in pools:
pool_info = self.convert(pool, self.cloud, 'lb_pool')
pools_info.append(pool_info)
LOG.info("Done")
return pools_info
def get_lb_monitors(self, tenant_id=''):
LOG.info("Getting load balancer monitors...")
monitors = \
self.neutron_client.list_health_monitors(
tenant_id=tenant_id)['health_monitors']
monitors_info = []
for mon in monitors:
mon_info = self.convert(mon, self.cloud, 'lb_monitor')
monitors_info.append(mon_info)
LOG.info("Done")
return monitors_info
def get_lb_members(self, tenant_id=''):
LOG.info("Getting load balancer members...")
members = self.neutron_client.list_members(
tenant_id=tenant_id)['members']
members_info = []
for member in members:
member_info = self.convert(member, self.cloud, 'lb_member')
members_info.append(member_info)
LOG.info("Done")
return members_info
def get_lb_vips(self, tenant_id=''):
LOG.info("Getting load balancer VIPs...")
vips = self.neutron_client.list_vips(
tenant_id=tenant_id)['vips']
vips_info = []
for vip in vips:
vip_info = self.convert(vip, self.cloud, 'lb_vip')
vips_info.append(vip_info)
LOG.info("Done")
return vips_info
def upload_lb_vips(self, vips, pools, subnets):
LOG.info("Creating load balancer VIPs on destination")
existing_vips = self.get_lb_vips()
existing_vips_hashlist = [ex_vip['res_hash']
for ex_vip in existing_vips]
existing_pools = self.get_lb_pools()
existing_snets = self.get_subnets()
for vip in vips:
if not vip['tenant_name']:
continue
if vip['res_hash'] not in existing_vips_hashlist:
tenant_id = self.identity_client.get_tenant_id_by_name(
vip['tenant_name'])
pool_hash = self.get_res_hash_by_id(pools, vip['pool_id'])
dst_pool = self.get_res_by_hash(existing_pools, pool_hash)
snet_hash = self.get_res_hash_by_id(subnets, vip['subnet_id'])
dst_subnet = self.get_res_by_hash(existing_snets, snet_hash)
vip_info = {
'vip': {
'name': vip['name'],
'description': vip['description'],
'address': vip['address'],
'protocol': vip['protocol'],
'protocol_port': vip['protocol_port'],
'connection_limit': vip['connection_limit'],
'pool_id': dst_pool['id'],
'tenant_id': tenant_id,
'subnet_id': dst_subnet['id']
}
}
if vip['session_persistence']:
vip_info['vip']['session_persistence'] = \
vip['session_persistence']
vip['meta']['id'] = self.neutron_client.create_vip(
vip_info)['vip']['id']
else:
LOG.info("| Dst cloud already has the same VIP "
"with address %s in tenant %s",
vip['address'], vip['tenant_name'])
LOG.info("Done")
def upload_lb_members(self, members, pools):
LOG.info("Creating load balancer members...")
existing_members = self.get_lb_members()
existing_members_hashlist = \
[ex_member['res_hash'] for ex_member in existing_members]
existing_pools = self.get_lb_pools()
for member in members:
if not member['tenant_name']:
continue
if member['res_hash'] not in existing_members_hashlist:
tenant_id = self.identity_client.get_tenant_id_by_name(
member['tenant_name'])
pool_hash = self.get_res_hash_by_id(pools, member['pool_id'])
dst_pool = self.get_res_by_hash(existing_pools, pool_hash)
member_info = {
'member': {
'protocol_port': member["protocol_port"],
'address': member['address'],
'pool_id': dst_pool['id'],
'tenant_id': tenant_id
}
}
member['meta']['id'] = self.neutron_client.create_member(
member_info)['member']['id']
else:
LOG.info("| Dst cloud already has the same member "
"with address %s in tenant %s",
member['address'], member['tenant_name'])
LOG.info("Done")
def upload_lb_monitors(self, monitors):
LOG.info("Creating load balancer monitors on destination...")
existing_mons = self.get_lb_monitors()
existing_mons_hashlist = \
[ex_mon['res_hash'] for ex_mon in existing_mons]
for mon in monitors:
if not mon['tenant_name']:
continue
if mon['res_hash'] not in existing_mons_hashlist:
tenant_id = self.identity_client.get_tenant_id_by_name(
mon['tenant_name'])
mon_info = {
'health_monitor':
{
'tenant_id': tenant_id,
'type': mon['type'],
'delay': mon['delay'],
'timeout': mon['timeout'],
'max_retries': mon['max_retries']
}
}
if mon['url_path']:
mon_info['health_monitor']['url_path'] = mon['url_path']
mon_info['health_monitor']['expected_codes'] = \
mon['expected_codes']
mon['meta']['id'] = self.neutron_client.create_health_monitor(
mon_info)['health_monitor']['id']
else:
LOG.info("| Dst cloud already has the same healthmonitor "
"with type %s in tenant %s",
mon['type'], mon['tenant_name'])
LOG.info("Done")
def associate_lb_monitors(self, pools, monitors):
LOG.info("Associating balancer monitors on destination...")
existing_pools = self.get_lb_pools()
existing_monitors = self.get_lb_monitors()
for pool in pools:
if not pool['tenant_name']:
continue
pool_hash = self.get_res_hash_by_id(pools, pool['id'])
dst_pool = self.get_res_by_hash(existing_pools, pool_hash)
for monitor_id in pool['health_monitors']:
monitor_hash = self.get_res_hash_by_id(monitors, monitor_id)
dst_monitor = self.get_res_by_hash(existing_monitors,
monitor_hash)
if dst_monitor['id'] not in dst_pool['health_monitors']:
dst_monitor_info = {
'health_monitor': {
'id': dst_monitor['id']
}
}
self.neutron_client.associate_health_monitor(
dst_pool['id'], dst_monitor_info)
else:
LOG.info(
"Dst pool with name %s already has associated the "
"healthmonitor with id %s in tenant %s",
dst_pool['name'], dst_monitor['id'],
dst_monitor['tenant_name'])
LOG.info("Done")
def upload_lb_pools(self, pools, subnets):
LOG.info("Creating load balancer pools on destination...")
existing_pools = self.get_lb_pools()
existing_pools_hashlist = \
[ex_pool['res_hash'] for ex_pool in existing_pools]
existing_subnets = self.get_subnets()
for pool in pools:
if pool['res_hash'] not in existing_pools_hashlist and \
pool['tenant_name']:
tenant_id = self.identity_client.get_tenant_id_by_name(
pool['tenant_name'])
snet_hash = self.get_res_hash_by_id(subnets, pool['subnet_id'])
snet_id = self.get_res_by_hash(existing_subnets,
snet_hash)['id']
pool_info = {
'pool': {
'name': pool['name'],
'description': pool['description'],
'tenant_id': tenant_id,
'subnet_id': snet_id,
'protocol': pool['protocol'],
'lb_method': pool['lb_method']
}
}
if pool.get('provider'):
pool_info['pool']['provider'] = pool.get('provider')
LOG.debug("Creating LB pool '%s'", pool['name'])
pool['meta']['id'] = \
self.neutron_client.create_pool(pool_info)['pool']['id']
else:
LOG.info("| Dst cloud already has the same pool "
"with name %s in tenant %s",
pool['name'], pool['tenant_name'])
LOG.info("Done")
def upload_neutron_security_groups(self, sec_groups):
LOG.info("Creating neutron security groups on destination...")
exist_secgrs = self.get_sec_gr_and_rules()
exis_secgrs_hashlist = [ex_sg['res_hash'] for ex_sg in exist_secgrs]
for sec_group in sec_groups:
if sec_group['name'] != DEFAULT_SECGR:
if sec_group['res_hash'] not in exis_secgrs_hashlist:
tenant_id = \
self.identity_client.get_tenant_id_by_name(
sec_group['tenant_name']
)
sg_info = \
{
'security_group':
{
'name': sec_group['name'],
'tenant_id': tenant_id,
'description': sec_group['description']
}
}
sec_group['meta']['id'] = self.neutron_client.\
create_security_group(sg_info)['security_group']['id']
LOG.info("Done")
def upload_sec_group_rules(self, sec_groups):
LOG.info("Creating neutron security group rules on destination...")
ex_secgrs = self.get_sec_gr_and_rules()
for sec_gr in sec_groups:
ex_secgr = \
self.get_res_by_hash(ex_secgrs, sec_gr['res_hash'])
if ex_secgr:
exrules_hlist = \
[r['rule_hash'] for r in ex_secgr['security_group_rules']]
else:
exrules_hlist = []
for rule in sec_gr['security_group_rules']:
if rule['protocol'] \
and (rule['rule_hash'] not in exrules_hlist):
rinfo = \
{'security_group_rule': {
'direction': rule['direction'],
'protocol': rule['protocol'],
'port_range_min': rule['port_range_min'],
'port_range_max': rule['port_range_max'],
'ethertype': rule['ethertype'],
'remote_ip_prefix': rule['remote_ip_prefix'],
'security_group_id': ex_secgr['id'],
'tenant_id': ex_secgr['tenant_id']}}
if rule['remote_group_id']:
remote_sghash = \
self.get_res_hash_by_id(sec_groups,
rule['remote_group_id'])
rem_ex_sec_gr = \
self.get_res_by_hash(ex_secgrs,
remote_sghash)
rinfo['security_group_rule']['remote_group_id'] = \
rem_ex_sec_gr['id']
LOG.debug("Creating security group %s", rinfo)
new_rule = \
self.neutron_client.create_security_group_rule(rinfo)
rule['meta']['id'] = new_rule['security_group_rule']['id']
LOG.info("Done")
def upload_networks(self, networks, src_seg_ids, detached_ports):
LOG.info("Creating networks on destination")
identity = self.identity_client
existing_networks = self.get_networks()
# we need to handle duplicates in segmentation ids
dst_seg_ids = get_segmentation_ids_from_net_list(existing_networks)
for src_net in networks:
network_detached_ports = [p for p in detached_ports
if p['network_id'] == src_net['id']]
# Check network for existence on destination cloud
dst_net = self.get_dst_net_by_src_net(existing_networks, src_net)
if dst_net:
LOG.info("DST cloud already has the same "
"network with name '%s' in tenant '%s'",
src_net['name'], src_net['tenant_name'])
self.deploy_detached_ports(dst_net, network_detached_ports)
continue
LOG.debug("Trying to create network '%s'", src_net['name'])
tenant_id = identity.get_tenant_id_by_name(src_net['tenant_name'])
if tenant_id is None:
LOG.warning("Tenant '%s' is not available on destination! "
"Make sure you migrated identity (keystone) "
"resources! Skipping network '%s'.",
src_net['tenant_name'], src_net['name'])
continue
no_extnet_migration = (
src_net.get('router:external') and
not self.config.migrate.migrate_extnets or
(src_net['id'] in self.ext_net_map))
if no_extnet_migration:
LOG.debug("External networks migration is disabled in the "
"config OR external networks mapping is enabled. "
"Skipping external network: '%s (%s)'",
src_net['name'], src_net['id'])
continue
# create dict, representing basic info about network
network_info = {
'network': {
'tenant_id': tenant_id,
'admin_state_up': src_net["admin_state_up"],
'shared': src_net["shared"],
'name': src_net['name'],
'router:external': src_net['router:external']
}
}
phys_net = src_net["provider:physical_network"]
network_type = src_net['provider:network_type']
seg_id = src_net["provider:segmentation_id"]
if phys_net or (src_net['provider:network_type'] in
['gre', 'vxlan']):
# Update network info with additional arguments.
# We need to check if we have parameter
# "provider:physical_network" or param
# "provider:network_type" either is 'gre' or 'vxlan'.
# If condition is satisfied, we need to specify 2 more params:
# "provider:network_type" and "provider:segmentation_id".
list_update_atr = ["provider:network_type"]
if phys_net:
list_update_atr.append("provider:physical_network")
for atr in list_update_atr:
network_info['network'].update({atr: src_net.get(atr)})
# Check segmentation ID for overlapping
# If it doesn't overlap with DST, save the same segmentation ID
# Otherwise pick free segmentation ID, which does not overlap
# with ANY segmentation ID on SRC
if seg_id is not None:
# Segmentation ID exists; Check for overlapping
seg_id_overlaps = (network_type in dst_seg_ids and
seg_id in dst_seg_ids[network_type])
if seg_id_overlaps:
# Choose the lowest free segmentation ID, that also
# does not overlap with SRC
new_seg_id = generate_new_segmentation_id(src_seg_ids,
dst_seg_ids,
network_type)
LOG.debug("'%s' segmentation ID '%s' overlaps with "
"DST. Generating new one: '%s'.",
network_type, seg_id, new_seg_id)
# Use it for network
network_info['network']['provider:segmentation_id'] = (
new_seg_id)
# Update DST segmentation IDs with the just created one
dst_seg_ids[network_type].append(new_seg_id)
else:
# Otherwise use original segmentation ID from SRC
network_info['network']['provider:segmentation_id'] = (
seg_id)
created_network = self.create_network(src_net, network_info)
self.deploy_detached_ports(created_network, network_detached_ports)
def deploy_detached_ports(self, net, ports):
for subnet in net['subnets']:
self.reset_subnet_dhcp(subnet['id'], False)
existing_ports = {p['id']: p
for p in self.get_ports_list(network_id=net['id'])}
for port in ports:
ip_addresses = [fip['ip_address'] for fip in port['fixed_ips']]
existing_port = self.check_existing_port(
net['id'], port['mac_address'],
ip_addresses=ip_addresses,
existing_ports=existing_ports.values())
if existing_port is not None:
if existing_port['mac_address'] == port['mac_address']:
LOG.debug('Port %s already migrated to %s',
port['id'], existing_port['id'])
continue
if existing_port['device_owner'].startswith('network:') or \
not existing_port['device_owner']:
LOG.debug('Deleting port %s from DST', repr(existing_port))
self.delete_port(existing_port['id'])
del existing_ports[existing_port['id']]
else:
raise exception.AbortMigrationError(
'Can\'t migrate port %s conflict with port %s' %
(port['id'], existing_port['id']))
self.create_port(net['id'], port['mac_address'], ip_addresses,
net['tenant_id'], True)
for subnet in net['subnets']:
if subnet['enable_dhcp']:
self.reset_subnet_dhcp(subnet['id'], True)
def create_network(self, src_net, network_info):
try:
LOG.debug("creating network with args: '%s'",
pprint.pformat(network_info))
created_net = self.neutron_client.create_network(network_info)
created_net = created_net['network']
LOG.info("Created net '%s'", created_net['name'])
except neutron_exc.NeutronClientException as e:
LOG.warning("Cannot create network on destination: %s. "
"Destination cloud already has the same network. May "
"result in port allocation errors, such as VM IP "
"allocation, floating IP allocation, router IP "
"allocation, etc.", e)
return
for snet in src_net['subnets']:
subnet_info = {
'subnet': {
'name': snet['name'],
'enable_dhcp': snet['enable_dhcp'],
'network_id': created_net['id'],
'cidr': snet['cidr'],
'allocation_pools': snet['allocation_pools'],
'gateway_ip': snet['gateway_ip'],
'ip_version': snet['ip_version'],
'dns_nameservers': snet['dns_nameservers'],
'tenant_id': created_net['tenant_id']
}
}
try:
created_subnet = self.neutron_client.create_subnet(subnet_info)
created_subnet = created_subnet['subnet']
snet['meta']['id'] = created_subnet['id']
LOG.info("Created subnet '%s' in net '%s'",
created_subnet['cidr'], created_net['name'])
created_net['subnets'].append(created_subnet)
except neutron_exc.NeutronClientException:
LOG.info("Subnet '%s' (%s) already exists, skipping",
snet['name'], snet['cidr'])
return created_net
def upload_routers(self, networks, subnets, routers):
LOG.info("Creating routers on destination")
existing_subnets = self.get_subnets()
existing_routers = self.get_routers()
for router in routers:
tenant_id = self.identity_client.get_tenant_id_by_name(
router['tenant_name'])
r_info = {'router': {'name': router['name'],
'tenant_id': tenant_id}}
existing_router = self.get_res_by_hash(existing_routers,
router['res_hash'])
if not existing_router:
LOG.debug("Creating router %s", pprint.pformat(r_info))
existing_router = self.convert_routers(
self.neutron_client.create_router(r_info)['router'],
self.cloud)
router['meta']['id'] = existing_router['id']
self.add_router_interfaces(router, existing_router, subnets,
existing_subnets)
ex_gw_info = router['external_gateway_info']
if ex_gw_info:
self.add_router_gateway(existing_router, router['ext_net_id'],
networks,
ex_gw_info.get('enable_snat'))
def add_router_gateway(self, dst_router, ext_net_id, src_nets,
set_snat=None):
"""
:param set_snat: possible values:
1. `None` - do not update, useful in cases when destination cloud does
not support SNAT for external networks (pre-icehouse);
2. `True` - enable SNAT
3. `False` - disable SNAT
"""
dst_nets = self.get_networks()
dst_net_id = self.get_new_extnet_id(ext_net_id, src_nets, dst_nets)
if dst_net_id:
info = {'network_id': dst_net_id}
if set_snat is not None:
info['enable_snat'] = set_snat
LOG.debug("Setting the external network (%s) gateway for a router "
"'%s' (%s)", dst_net_id, dst_router['name'],
dst_router['id'])
self.neutron_client.add_gateway_router(dst_router['id'], info)
else:
LOG.warning('External (%s) network is not exists on destination',
ext_net_id)
def add_router_interfaces(self, src_router, dst_router, src_subnets,
dst_subnets):
for subnet_id in src_router['subnet_ids']:
subnet_hash = self.get_res_hash_by_id(src_subnets, subnet_id)
src_subnet = self.get_res_by_hash(src_subnets, subnet_hash)
if src_subnet['external']:
LOG.debug("NOT connecting subnet '%s' to router '%s' because "
"it's connected to external network", subnet_id,
dst_router['name'])
continue
existing_subnet = self.get_res_by_hash(dst_subnets, subnet_hash)
if existing_subnet['id'] in dst_router['subnet_ids']:
continue
LOG.debug("Adding subnet '%s' to router '%s'", subnet_id,
dst_router['name'])
try:
self.neutron_client.add_interface_router(
dst_router['id'],
{"subnet_id": existing_subnet['id']})
except neutron_exc.NeutronClientException as e:
LOG.debug(e, exc_info=True)
LOG.warning("Couldn't add interface to subnet %s to router %s:"
"\n%s", existing_subnet['id'], dst_router['id'], e)
def upload_floatingips(self, networks, src_floats):
"""Creates floating IPs on destination
Process:
1. Create floating IP on destination using neutron APIs in particular
tenant. This allocates first IP address available in external
network.
2. If keep_floating_ips option is set:
2.1. Modify IP address of a floating IP to be the same as on
destination. This is done from the DB level.
2.2. Else - do not modify floating IP address
3. Return list of ID of new floating IPs
"""
LOG.info("Uploading floating IPs...")
existing_networks = self.get_networks()
new_floating_ids = []
fips_dst = self.neutron_client.list_floatingips()['floatingips']
ipfloatings = {fip['floating_ip_address']: fip['id']
for fip in fips_dst}
for fip in src_floats:
ip = fip['floating_ip_address']
if ip in ipfloatings:
new_floating_ids.append(ipfloatings[ip])
continue
with ksresource.AddAdminUserToNonAdminTenant(
self.identity_client.keystone_client,
self.config.cloud.user,
fip['tenant_name']):
ext_net_id = self.get_new_extnet_id(
fip['floating_network_id'], networks, existing_networks)
if ext_net_id is None:
LOG.info("No external net for floating IP, make sure all "
"external networks migrated. Skipping floating "
"IP '%s'", fip['floating_ip_address'])
continue
tenant = self.identity_client.keystone_client.tenants.find(
name=fip['tenant_name'])
new_fip = {
'floatingip': {
'floating_network_id': ext_net_id,
'tenant_id': tenant.id
}
}
created_fip = self.create_floatingip(new_fip)
if created_fip is None:
continue
fip_id = created_fip['id']
new_floating_ids.append(fip_id)
sqls = [('UPDATE IGNORE floatingips '
'SET floating_ip_address = "{ip}" '
'WHERE id = "{fip_id}"').format(ip=ip, fip_id=fip_id),
('UPDATE IGNORE ipallocations '
'SET ip_address = "{ip}" '
'WHERE port_id = ('
'SELECT floating_port_id '
'FROM floatingips '
'WHERE id = "{fip_id}")').format(
ip=ip, fip_id=fip_id),
('DELETE FROM ipavailabilityranges '
'WHERE allocation_pool_id in ( '
'SELECT id '
'FROM ipallocationpools '
'WHERE subnet_id = ( '
'SELECT subnet_id '
'FROM ipallocations '
'WHERE port_id = ( '
'SELECT floating_port_id '
'FROM floatingips '
'WHERE id = "{fip_id}")))').format(
fip_id=fip_id)]
LOG.debug(sqls)
dst_mysql = self.mysql_connector
dst_mysql.batch_execute(sqls)
LOG.info("Done")
return new_floating_ids
def create_floatingip(self, fip):
try:
LOG.debug("Creating FIP on net '%s'",
fip['floatingip']['floating_network_id'])
created = self.neutron_client.create_floatingip(fip)
return created['floatingip']
except neutron_exc.NeutronClientException as e:
LOG.warning("Unable to create floating IP on destination: '%s'", e)
def update_floatingip(self, floatingip_id, port_id=None):
update_dict = {'floatingip': {'port_id': port_id}}
LOG.debug("Associating floating IP '%s' with port '%s'",
floatingip_id, port_id)
return self.neutron_client.update_floatingip(floatingip_id,
update_dict)
@staticmethod
def get_res_by_hash(existing_resources, resource_hash):
for resource in existing_resources:
if resource['res_hash'] == resource_hash:
return resource
@staticmethod
def get_res_hash_by_id(resources, resource_id):
for resource in resources:
if resource['id'] == resource_id:
return resource['res_hash']
@staticmethod
def get_resource_hash(neutron_resource, *args):
net_res = copy.deepcopy(neutron_resource)
list_info = list()
for arg in args:
if not isinstance(net_res[arg], list):
list_info.append(net_res[arg])
else:
if arg == 'allocation_pools':
pools = net_res[arg]
net_res[arg] = [ip for pl in pools for ip in pl.values()]
for argitem in net_res[arg]:
if isinstance(argitem, basestring):
argitem = argitem.lower()
list_info.append(argitem)
hash_list = \
[info.lower() if isinstance(info, basestring) else info
for info in list_info]
hash_list.sort()
return hash(tuple(hash_list))
def get_new_extnet_id(self, src_net_id, src_nets, dst_nets):
"""
Get ID of similar external network form DST.
:param src_net_id: External network ID from SRC cloud,
:param src_nets: Networks list from SRC cloud,
:param dst_nets: Networks list from DST cloud,
:return unicode: External network ID from DST, that matches with the
similar network from SRC.
"""
if src_net_id in self.ext_net_map:
dst_net_id = self.ext_net_map[src_net_id]
else:
src_net = get_network_from_list_by_id(src_net_id, src_nets)
dst_net = self.get_dst_net_by_src_net(dst_nets, src_net)
if not dst_net:
return
dst_net_id = dst_net['id']
return dst_net_id
@staticmethod
def get_dst_net_by_src_net(existing_networks, src_net):
"""
Get the same Network object from DST cloud.
:param existing_networks: Existing networks list on DST cloud,
:param src_net: Network object from SRC,
:return dict: Network object from DST, that matches with the same
network from SRC.
"""
for net in existing_networks:
if (net['res_hash'] == src_net['res_hash'] and
net['subnets_hash'] == src_net['subnets_hash']):
return net
class Router(object):
"""
Represents router_info, extract external ip.
Router_info contain list of ips only in different order. Impossible to
define external router ip.
"""
def __init__(self, router_info, subnets):
self.id = router_info['id']
self.ext_net_id = router_info.get('ext_net_id', None)
self.int_cidr = []
self.tenant_name = router_info['tenant_name']
if self.ext_net_id:
subnet_ids = router_info['subnet_ids']
for subnet_id in subnet_ids:
subnet = subnets[subnet_id]
if subnet['network_id'] == self.ext_net_id:
self.ext_cidr = subnet['cidr']
self.ext_subnet_id = subnet_id
else:
self.int_cidr.append(subnet['cidr'])
ext_network = ipaddr.IPNetwork(self.ext_cidr)
for ip in router_info['ips']:
if ext_network.Contains(ipaddr.IPAddress(ip)):
self.ext_ip = ip
break
def get_network_from_list_by_id(network_id, networks_list):
"""Get Neutron network by id from provided networks list.
:param network_id: Neutron network ID
:param networks_list: List of Neutron networks, where target network should
be searched
"""
for net in networks_list:
if net['id'] == network_id:
return net
LOG.warning("Cannot obtain network with id='%s' from provided networks "
"list", network_id)
def get_subnet_from_list_by_id(subnet_id, subnets_list):
"""Get Neutron subnet by id from provided subnets list.
:param subnet_id: Neutron subnet ID
:param subnets_list: List of Neutron subnets, where target subnet should
be searched
"""
for subnet in subnets_list:
if subnet['id'] == subnet_id:
return subnet
LOG.warning("Cannot obtain subnet with id='%s' from provided subnets "
"list", subnet_id)
def get_ports_by_device_id_from_list(device_id, ports_list):
"""Get Neutron ports by device ID from provided ports list.
:param device_id: Port device ID
:param ports_list: List of Neutron ports, where target ports should be
searched
:result: List of ports, which are belong to specified device ID
"""
ports = []
for port in ports_list:
if port['device_id'] == device_id:
ports.append(port)
if not ports:
LOG.debug("There are no ports with device_id='%s' in provided list",
device_id)
return ports
def get_network_from_list(ip, tenant_id, networks_list, subnets_list):
"""Get Neutron network by parameters from provided list.
:param ip: IP address of VM from this network
:param tenant_id: Tenant Id of VM in this network
:param networks_list: List of Neutron networks, where target network
should be searched
:param subnets_list: List of Neutron subnets, where target network
should be searched
"""
instance_ip = ipaddr.IPAddress(ip)
for subnet in subnets_list:
network_id = subnet['network_id']
net = get_network_from_list_by_id(network_id, networks_list)
if subnet['tenant_id'] == tenant_id or net['shared']:
if ipaddr.IPNetwork(subnet['cidr']).Contains(instance_ip):
return get_network_from_list_by_id(network_id,
networks_list)
def get_segmentation_ids_from_net_list(networks):
"""Get busy segmentation IDs from provided networks list.
We need to handle duplicates in segmentation ids.
Neutron has different validation rules for different network types.
For 'gre' and 'vxlan' network types there is no strong requirement
for 'physical_network' attribute, if we want to have
'segmentation_id', because traffic is encapsulated in L3 packets.
For 'vlan' network type there is a strong requirement for
'physical_network' attribute, if we want to have 'segmentation_id'.
:result: Dictionary with busy segmentation IDs.
Hash is used with structure {"gre": [1, 2, ...],
"vlan": [1, 2, ...]}
"""
used_seg_ids = {}
for net in networks:
network_has_segmentation_id = (
net["provider:physical_network"] or
(net["provider:network_type"] in ['gre', 'vxlan']))
if network_has_segmentation_id:
if net["provider:network_type"] not in used_seg_ids:
used_seg_ids[net['provider:network_type']] = []
if net["provider:segmentation_id"] is not None:
used_seg_ids[net["provider:network_type"]].append(
net["provider:segmentation_id"])
return used_seg_ids
def generate_new_segmentation_id(src_seg_ids, dst_seg_ids, network_type):
"""Generate new segmentation ID based on provided info with busy ones.
Search for the lowest free segmentation ID. IDs '0' and '1' are reserved
in most of network types, so start searching from '2'.
For 'vlan' network type ID '4095' is the last one in available range and
besides also reserved. Raise AbortMigrationError if reach this ID.
:param src_seg_ids: Dictionary with busy segmentation IDs on SRC
:param dst_seg_ids: Dictionary with busy segmentation IDs on DST
:param network_type: Network type ('vlan', 'vxlan' or 'gre')
:result int: New generated free segmentation ID
"""
src_seg_ids = set(src_seg_ids.get(network_type, []))
dst_seg_ids = set(dst_seg_ids.get(network_type, []))
busy_seg_ids = src_seg_ids | dst_seg_ids
free_seg_id = None
counter = 2
while free_seg_id is None:
if counter not in busy_seg_ids:
free_seg_id = counter
counter += 1
if free_seg_id >= 4095 and network_type == 'vlan':
raise exception.AbortMigrationError("Segmentation IDs limit for 'vlan'"
" network type has been exceeded")
return free_seg_id
| {
"content_hash": "b7947d65c25a85065195cd8dd31b7b2e",
"timestamp": "",
"source": "github",
"line_count": 1740,
"max_line_length": 79,
"avg_line_length": 41.76149425287356,
"alnum_prop": 0.5129704809743343,
"repo_name": "mgrygoriev/CloudFerry",
"id": "63386cf4b6f7222d0ffa8b5c29e8fcde54292072",
"size": "73241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudferrylib/os/network/neutron.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2615"
},
{
"name": "Python",
"bytes": "775433"
},
{
"name": "Ruby",
"bytes": "5181"
},
{
"name": "Shell",
"bytes": "34787"
}
],
"symlink_target": ""
} |
""" A script/util to upgrade all files in a directory
This is rather conservative in its approach, only copying/overwriting
new and unedited files.
To be used by "upgrade" feature.
"""
try:
from IPython.external.path import path
except ImportError:
from path import path
import md5,pickle
def showdiff(old,new):
import difflib
d = difflib.Differ()
lines = d.compare(old.lines(),new.lines())
realdiff = False
for l in lines:
print l,
if not realdiff and not l[0].isspace():
realdiff = True
return realdiff
def upgrade_dir(srcdir, tgtdir):
""" Copy over all files in srcdir to tgtdir w/ native line endings
Creates .upgrade_report in tgtdir that stores md5sums of all files
to notice changed files b/w upgrades.
"""
def pr(s):
print s
junk = ['.svn','ipythonrc*','*.pyc', '*.pyo', '*~', '.hg']
def ignorable(p):
for pat in junk:
if p.startswith(pat) or p.fnmatch(pat):
return True
return False
modded = []
files = [path(srcdir).relpathto(p) for p in path(srcdir).walkfiles()]
#print files
rep = tgtdir / '.upgrade_report'
try:
rpt = pickle.load(rep.open())
except:
rpt = {}
for f in files:
if ignorable(f):
continue
src = srcdir / f
tgt = tgtdir / f
if not tgt.isfile():
pr("Creating %s" % str(tgt))
tgt.write_text(src.text())
rpt[str(tgt)] = md5.new(tgt.text()).hexdigest()
else:
cont = tgt.text()
sum = rpt.get(str(tgt), None)
#print sum
if sum and md5.new(cont).hexdigest() == sum:
pr("%s: Unedited, installing new version" % tgt)
tgt.write_text(src.text())
rpt[str(tgt)] = md5.new(tgt.text()).hexdigest()
else:
pr(' == Modified, skipping %s, diffs below == ' % tgt)
#rpt[str(tgt)] = md5.new(tgt.bytes()).hexdigest()
real = showdiff(tgt,src)
pr('') # empty line
if not real:
pr("(Ok, it was identical, only upgrading checksum)")
rpt[str(tgt)] = md5.new(tgt.text()).hexdigest()
else:
modded.append(tgt)
#print rpt
pickle.dump(rpt, rep.open('w'))
if modded:
print "\n\nDelete the following files manually (and rerun %upgrade)\nif you need a full upgrade:"
for m in modded:
print m
import sys
if __name__ == "__main__":
upgrade_dir(path(sys.argv[1]), path(sys.argv[2]))
| {
"content_hash": "54bb8d400eb17cef91af4d803d2d09f8",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 105,
"avg_line_length": 29.34065934065934,
"alnum_prop": 0.5400749063670413,
"repo_name": "toomoresuch/pysonengine",
"id": "2f31e93174ab2b1afdef4baf719ecc2a108f63b9",
"size": "2692",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "eggs/ipython-0.10.1-py2.6.egg/IPython/upgrade_dir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "401941"
},
{
"name": "JavaScript",
"bytes": "628757"
},
{
"name": "Python",
"bytes": "12919662"
},
{
"name": "Shell",
"bytes": "416"
},
{
"name": "VimL",
"bytes": "4587"
}
],
"symlink_target": ""
} |
"""
Created on Thu Mar 24 08:18:04 2016
@author: npop
Decimation parameter calculator
Given an evaluation frequency, the following is calculated:
"""
import numpy as np
# utils
from utilsEvalFreq import *
from utilsIO import *
class DecimationParams(object):
###################
### CONSTRUCTOR
##################
def __init__(self, fs):
# data reader
self.fs = fs
# set a variable used for calculating from frequecy
self.divFactor = 2
# calculate some initial values based on default vals
# 7 levels with 7 frequencies per level
self.calcFrequencyParams(7, 7)
###################
### GET GENERAL INFO
##################
def getSampleFreq(self):
return self.fs
def getSampleFreqLevel(self, declevel):
return self.fs/self.getDecFactor(declevel)
def getNumLevels(self):
return self.numLevels
def getDecFactors(self):
return self.decFactors
def getDecFactor(self, decLevel):
return self.decFactors[decLevel]
def getIncrementalFactor(self, decLevel):
if decLevel == 0:
return int(self.decFactors[decLevel])
else:
return int(self.decFactors[decLevel]/self.decFactors[decLevel-1])
def getDecFrequencies(self):
return self.decFrequencies
def getEvalFrequencies(self):
return self.evalFreq
def getNumFreqPerLevel(self):
return self.freqPerLevel
def getEvalFrequenciesForLevel(self, level):
return self.evalFreqPerLevel[level,:]
def getEvalFrequenciesAllLevels(self):
return self.evalFreqPerLevel
###################
### GET DECIMATED DATA
### FOR THIS LEVEL
##################
def getData(self):
return self.data
###################
### SET FREQUENCIES
### PER LEVEL
##################
def setFrequencyParams(self, evalFreq, freqPerLevel, maxLevel):
self.sortFreq(evalFreq)
self.calcDecimationParams(evalFreq, freqPerLevel, maxLevel)
def setDecimationParams(self, numLevels, freqPerLevel):
self.calcFrequencyParams(numLevels, freqPerLevel)
###################
### CALCULATE DECIMATION LEVELS
### BASED ON AMOUNT OF DATA
##################
def calcDecimationParams(self, evalFreq, maxLevel, freqPerLevel):
# in case list
evalFreq = np.array(evalFreq)
# calculating decimation parameters from evaluation frequencies
maxf = self.fs/4
# find the first evaluation frequency less than or equal to maxf
fHigh = evalFreq[0]
for ifreq in xrange(0, evalFreq.size):
if evalFreq[ifreq] <= maxf:
fHigh = evalFreq[ifreq]
break
iHigh = evalFreq.tolist().index(fHigh)
evalFreqSub = evalFreq[iHigh:]
# calculate number of levels
numLevels = maxLevel
# check if enough evaluation frequencies
if len(evalFreqSub) < freqPerLevel*maxLevel:
#numLevels = int(math.floor(len(evalFreqSub)/freqPerLevel))
numLevels = int(math.ceil(1.0*len(evalFreqSub)/freqPerLevel))
# do another subslice
evalFreqSub = evalFreqSub[:numLevels*freqPerLevel]
# now create an array of evalation frequencies per decimation level
# evalFreqPerLevel = np.ones(shape=(numLevels, freqPerLevel))
evalFreqPerLevel = np.ones(shape=(numLevels, freqPerLevel)) * -1
for ilevel in xrange(0, numLevels):
for ifreq in xrange(0, freqPerLevel):
if ilevel*freqPerLevel + ifreq >= len(evalFreqSub):
break
evalFreqPerLevel[ilevel, ifreq] = evalFreqSub[ilevel*freqPerLevel + ifreq]
# now calculate decimation factors
decFactors = np.ones(shape=(numLevels))
decFrequencies = np.ones(shape=(numLevels))
for ilevel in xrange(0, numLevels):
decFactors[ilevel], decFrequencies[ilevel] = self.calcNearestFactor(evalFreqPerLevel[ilevel][0])
# finally, set all parameters
self.evalFreq = evalFreqSub
self.freqPerLevel = freqPerLevel
self.numLevels = numLevels
self.evalFreqPerLevel = evalFreqPerLevel
self.decFactors = decFactors
self.decFrequencies = decFrequencies
def calcFrequencyParams(self, numLevels, freqPerLevel):
# calculating evaluation frequency parameters from decimation factors
# takes number of decimation levels
# takes number of frequencies per level
# and assigns evaluation frequencies accordingly
# along with decimation factors
numFreq = numLevels*freqPerLevel
evalFreq = getEvaluationFreqSize(self.fs, numFreq)
self.calcDecimationParams(evalFreq, numLevels, freqPerLevel)
def sortFreq(self, freq):
# sorted in descending order
# sort in place
freq[::-1].sort()
def calcNearestFactor(self, freq):
# want sampling frequency to be 4 times greater than highest freq
fsMin = freq*4
# set to initial sampling frequency
f = float(self.fs)
fac = 1
while f > fsMin*self.divFactor:
f = f/self.divFactor
fac = fac*self.divFactor
return fac, f
###################
### DEBUG
##################
def printInfo(self):
self.printText("####################")
self.printText("DECIMATION PARAMETER INFO BEGIN")
self.printText("####################")
self.printText("Sampling frequency = {:f}".format(self.fs))
self.printText("Number of decimation levels = {:d}".format(self.getNumLevels()))
for il in xrange(0, self.getNumLevels()):
self.printText("Level = {:d}\tsample freq. [Hz] = {:.6f}\tsample rate [s] = {:.6f}\tdec. factor = {:07d}\tinc. factor = {:d}".format(
il, self.decFrequencies[il], 1.0/self.decFrequencies[il], int(self.decFactors[il]), self.getIncrementalFactor(il)))
self.printText("####################")
self.printText("DECIMATION PARAMETER INFO END")
self.printText("####################")
self.printEvalFreq()
def printEvalFreq(self):
self.printText("####################")
self.printText("DECIMATION PARAMETER FREQUENCIES BEGIN")
self.printText("####################")
self.printText("Evaluation frequencies [Hz]")
for il in xrange(0, self.getNumLevels()):
freqForLevel = self.getEvalFrequenciesForLevel(il)
eFreqStr = arrayToString(freqForLevel)
self.printText("Level = {:d}: {}".format(il, eFreqStr))
self.printText("####################")
self.printText("DECIMATION PARAMETER FREQUENCIES END")
self.printText("####################")
def printText(self, infoStr):
generalPrint("Decimation Parameters Info", infoStr)
def printWarning(self, warnStr):
warningPrint("Decimation Parameters Warning", warnStr)
| {
"content_hash": "341190d64812cbc3928362c518608b2b",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 136,
"avg_line_length": 31.733333333333334,
"alnum_prop": 0.6914996767937944,
"repo_name": "nss350/magPy",
"id": "348b4449acf05d21fa9e8372366e0e38a850a96c",
"size": "6188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/decimationParameters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "496897"
}
],
"symlink_target": ""
} |
"""Tests for rss widget serializer"""
import time
import pytest
from rest_framework.serializers import ValidationError
from open_discussions.test_utils import PickleableMock
from widgets.factories import WidgetInstanceFactory, WidgetListFactory
from widgets.serializers import rss
@pytest.mark.django_db
@pytest.mark.parametrize("raise_exception", [True, False])
@pytest.mark.parametrize("timestamp_key", ["published_parsed", "updated_parsed"])
@pytest.mark.parametrize("item_count", [0, 8, 15])
@pytest.mark.parametrize("display_limit", [0, 6, 10, 14, 18])
def test_url_widget_serialize(
mocker, raise_exception, timestamp_key, item_count, display_limit
):
"""Tests that the rss widget serializes correctly"""
entries = sorted(
[
{
"title": f"Title {idx}",
"description": f"Description {idx}",
"link": f"http://example.com/{idx}",
timestamp_key: time.gmtime(),
}
for idx in range(item_count)
],
reverse=True,
key=lambda entry: entry[timestamp_key],
)
mock_parse = mocker.patch("feedparser.parse")
if raise_exception:
mock_parse.side_effect = Exception("bad")
else:
mock_parse.return_value = PickleableMock(entries=entries)
widget_instance = WidgetInstanceFactory.create(type_rss=True)
widget_instance.configuration["feed_display_limit"] = display_limit
widget_instance.save()
data = rss.RssFeedWidgetSerializer(widget_instance).data
mock_parse.assert_called_once_with(widget_instance.configuration["url"])
assert data == {
"id": widget_instance.id,
"widget_type": "RSS Feed",
"title": widget_instance.title,
"configuration": widget_instance.configuration,
"json": {
"title": widget_instance.title,
"entries": []
if raise_exception
else [
{
"title": entry["title"],
"description": entry["description"],
"link": entry["link"],
"timestamp": time.strftime(
"%Y-%m-%dT%H:%M:%SZ", entry[timestamp_key]
),
}
for entry in entries[: min(rss.MAX_FEED_ITEMS, display_limit)]
],
},
}
@pytest.mark.django_db
@pytest.mark.parametrize("raise_exception", [True, False])
def test_url_widget_save(mocker, raise_exception):
"""Tests that the rss widget serializes correctly"""
widget_list = WidgetListFactory.create()
url = "http://example.com"
data = {
"widget_list_id": widget_list.id,
"title": "Title",
"widget_type": "RSS Feed",
"position": 1,
"configuration": {"url": url},
}
mock_parse = mocker.patch("feedparser.parse")
if raise_exception:
mock_parse.side_effect = Exception("bad")
else:
mock_parse.return_value = PickleableMock(entries=[])
serializer = rss.RssFeedWidgetSerializer(data=data)
serializer.is_valid()
if raise_exception:
with pytest.raises(ValidationError):
serializer.save()
else:
serializer.save()
mock_parse.assert_called_once_with(url)
| {
"content_hash": "71aeafe435eb261b8c0e97980a8b3f2f",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 81,
"avg_line_length": 33.08080808080808,
"alnum_prop": 0.601526717557252,
"repo_name": "mitodl/open-discussions",
"id": "bfda131a7ad0397f79b533f64ce660490585e914",
"size": "3275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "widgets/serializers/rss_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1040"
},
{
"name": "HTML",
"bytes": "78316"
},
{
"name": "JavaScript",
"bytes": "1704037"
},
{
"name": "Procfile",
"bytes": "675"
},
{
"name": "Python",
"bytes": "2264549"
},
{
"name": "SCSS",
"bytes": "133442"
},
{
"name": "Shell",
"bytes": "11787"
},
{
"name": "TypeScript",
"bytes": "307134"
}
],
"symlink_target": ""
} |
import pandas as pd
import os
import numpy as np
import util
# from scipy.spatial.distance import cdist
def read_route_id_process_key(city):
config = pd.read_csv(os.path.dirname(os.path.realpath(__file__)) + '/city_url.config')
return config[config['city']==city].values[0][4]
def read_city_url_from_config(city):
config = pd.read_csv(os.path.dirname(os.path.realpath(__file__)) + '/city_url.config')
return config[config['city']==city].values[0][1]
def read_city_code_from_config(lat, lng):
config = pd.read_csv(os.path.dirname(os.path.realpath(__file__)) + '/city_url.config')
lat_lng=config[['lat', 'lng']].as_matrix().astype(float)
print(lat, lng)
# calcValues=cdist([[lat, lng]],lat_lng)[0]
calcValues=util.distance_calc([[lat, lng]],lat_lng)[0]
city_index = np.argmin(calcValues, axis=0)
city_code = config.ix[city_index]['city']
return city_code
def read_api_config(item):
config = pd.read_csv(os.path.dirname(os.path.realpath(__file__)) + '/api.config')
return config[config['item']==item].values[0][1]
if __name__ == "__main__":
lat = 51.135494
lng = -114.158389
print(read_city_code_from_config(lat, lng))
| {
"content_hash": "2dd3ce9e2654e3126df444f2cb6dfacb",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 90,
"avg_line_length": 29.925,
"alnum_prop": 0.6524644945697577,
"repo_name": "FiniteElementries/OneBus",
"id": "eb112cd9d97d39f157acf6afee1feaa214d4f442",
"size": "1197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Database/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3023"
},
{
"name": "HTML",
"bytes": "2009"
},
{
"name": "JavaScript",
"bytes": "7431"
},
{
"name": "Python",
"bytes": "16592"
}
],
"symlink_target": ""
} |
import shelve, time
def startup(addPluginHook, addHook, world) :
addPluginHook(world, "delignore", main, 4, ["self", "info", "args", "world"])
def main(self, info, args, world) :
"""delignore <hostmask>
Removes a hostmask from the bot's ignore list"""
if args[1].lower() in self.ignorelist :
self.ignorelist.remove(args[1].lower())
self.msg(info["channel"], "Ignore removed.")
else :
self.msg(info["channel"], "That hostmask is not on the ignore list.")
| {
"content_hash": "8e1066b365d3c62934bf69c06c665ada",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 81,
"avg_line_length": 44.90909090909091,
"alnum_prop": 0.6497975708502024,
"repo_name": "sonicrules1234/sonicbot",
"id": "cac9c340933b9a562002ee9bd1bbcde2ec4e0681",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/sonicbotv4",
"path": "plugins/delignore.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "3887"
},
{
"name": "Python",
"bytes": "650222"
}
],
"symlink_target": ""
} |
""" Implements a 2D-ray class - same as algebra.Ray2D, but without the
dependency from the algebra package.
A ray is an infinite line and is defined by the equation
y(x) = y0 + x * slope in a cartesian coordinate system
"""
__author__ = "mozman <mozman@gmx.at>"
import math
from dxfwrite.algebra import equals_almost, normalize_angle, is_vertical_angle
__all__ = ['Ray2D', 'ParallelRaysError']
class ParallelRaysError(ArithmeticError):
pass
HALF_PI = math.pi / 2.
THREE_PI_HALF = 1.5 * math.pi
DOUBLE_PI = math.pi * 2.
XCOORD = 0
YCOORD = 1
class Ray2D(object):
"""defines an infinite ray (line with no end points)
treat it as IMMUTABLE - dont't change the status
possible keyword args: slope, angle as float
point1, point2 as 2d-tuples
input case A: point1, point2
ray goes through point1 and point2, vertical lines are possible
ignores the keyword arguments slope and angle
input case B: point1, slope
ray goes through point1 with slope
argument point2 have to be None
vertical lines are not possible because slope can't be infinite.
ignores the keyword argument angle
input case C: point1, angle (in radian)
argument point2 have to be None
ray goes through point1 with the submitted angle
vertical lines are possible
if keyword argument slope is defined, angle will be ignored
"""
def __init__(self, point1, point2=None, **kwargs):
self._vertical = False
self.places = 7
p1x = float(point1[XCOORD])
p1y = float(point1[YCOORD])
if point2 is not None: # case A
# normalize point order to assure consist signs for slopes
# +slope goes up and -slope goes down
self._slope = 0
self._angle = 0
p2x = float(point2[XCOORD])
p2y = float(point2[YCOORD])
if p1x > p2x :
p1x, p2x = p2x, p1x
p1y, p2y = p2y, p1y
dx = p2x - p1x
dy = p2y - p1y
if dx == 0. : # line is vertical
self._x = p1x
self._set_angle(HALF_PI)
else :
self._set_slope(dy/dx)
elif 'slope' in kwargs: # case B
self._set_slope(float(kwargs['slope']))
elif 'angle' in kwargs: # case C
self._set_angle(normalize_angle(float(kwargs['angle'])))
if self.is_vertical:
self._x = p1x
if not self.is_vertical:
# y0 is the y-coordinate of this ray at x-coordinate == 0
self._y0 = p1y - self.slope * p1x
@property
def slope(self):
""" get slope of the ray """
return self._slope
def _set_slope(self, slope): # private
self._slope = slope
self._angle = normalize_angle(math.atan(slope))
@property
def angle(self):
return self._angle
def _set_angle(self, angle): # private
self._angle = angle
self._slope = math.tan(angle)
self._vertical = is_vertical_angle(angle)
@property
def is_vertical(self):
return self._vertical
@property
def is_horizontal(self):
return equals_almost(self.slope, 0., self.places)
def is_parallel(self, ray):
""" return True if the rays are parallel, else False"""
if self.is_vertical:
return ray.is_vertical
else:
return equals_almost(self.slope, ray.slope, self.places)
def intersect(self, other_ray):
""" returns the intersection point (xy-tuple) of self and
other_ray; raises ParallelRaysError, if the rays are parallel"""
ray1 = self
ray2 = other_ray
if not ray1.is_parallel(ray2):
if ray1.is_vertical:
x = ray1._x
y = ray2.get_y(x)
elif ray2.is_vertical:
x = ray2._x
y = ray1.get_y(x)
else :
# calc intersection with the 'straight-line-equation'
# based on y(x) = y0 + x*slope
x = (ray1._y0 - ray2._y0)/(ray2.slope - ray1.slope)
y = ray1.get_y(x)
return (x, y)
else:
raise ParallelRaysError("no intersection, rays are parallel")
def normal_through(self, point):
""" returns a ray which is normal to self and goes through point"""
return Ray2D(point, angle=self.angle+HALF_PI)
def goes_through(self, point):
""" returns True if ray goes through point, else False"""
if self.is_vertical:
return equals_almost(point[XCOORD], self._x, self.places)
else :
return equals_almost(point[YCOORD], self.get_y(point[XCOORD]),
self.places)
def get_y(self, x):
""" get y by x, raises ArithmeticError for vertical lines"""
if self.is_vertical:
raise ArithmeticError
return self._y0 + float(x) * self.slope
def get_x(self, y):
""" get x by y, raises ArithmeticError for horizontal lines"""
if self.is_vertical :
return self._x
else :
if self.is_horizontal:
raise ArithmeticError
return (float(y) - self._y0) / self.slope
def bisectrix(self, other_ray):
""" bisectrix between self and other_ray """
if self.is_parallel(other_ray):
raise ParallelRaysError
cross_point = self.intersect(other_ray)
alpha = (self.angle + other_ray.angle) / 2.0
return Ray2D(cross_point, angle=alpha)
| {
"content_hash": "5ea8edcfcd5b07380563007759aefe84",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 78,
"avg_line_length": 33.25,
"alnum_prop": 0.5798424633011099,
"repo_name": "sbarton272/AcousticBarcodes-Explorations",
"id": "d4a12f66a9421bdd8a0284f317f7b92a1a17c981",
"size": "5840",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "barcodes/dxfwrite/dxfwrite/algebra/ray.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "8313"
},
{
"name": "Python",
"bytes": "725409"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
} |
def install(*packages):
pass
def update():
pass
| {
"content_hash": "9e16a9026299e01d11ee66efef3b6b8a",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 23,
"avg_line_length": 9.666666666666666,
"alnum_prop": 0.603448275862069,
"repo_name": "hatchery/genepool",
"id": "2db5cd0a5ac700b62288074a8ed24e348cfd5226",
"size": "58",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "genes/yum/commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55184"
}
],
"symlink_target": ""
} |
"""
Django settings for web project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ejms$zju+h!=01@(8liqhuzw=%3_$w#h93k1)ix42w^ari9yt5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "2b62ac54a92a775ab511bf44dba5cd88",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 71,
"avg_line_length": 25.774509803921568,
"alnum_prop": 0.6903765690376569,
"repo_name": "postfix/viper-1",
"id": "c00cfb0dc35d76209dd19ad566118f24ecf8eeb2",
"size": "2629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/web/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1306"
},
{
"name": "JavaScript",
"bytes": "9295"
},
{
"name": "Python",
"bytes": "1392052"
},
{
"name": "Smarty",
"bytes": "28210"
}
],
"symlink_target": ""
} |
import socket, traceback, sys, copy, re, time, threading, select
from datetime import datetime, timedelta
import apsw # See README.md
from pubsub import pub # See README.md
from libfap import * # See README.md
import settings
############################
# Classes
############################
class clsBakerMessage():
def __init__(self, src, dest, msg, msgid, msgack, orig_packet, dtarrival, key=''):
try:
self.src = src.upper()
self.dest = dest.upper()
self.msg = msg.replace("'","?").replace('"','?') # replace single and double quotes with question mark
self.msgid = msgid
self.msgack = msgack # To match msgack in ReceiveQ to item sent from Send Q, use key
# Item in SendQ with key is waiting for msgack in Recieive Q where SendQ.key (SendQ.src + SendQ.msgid_new) = ReceiveQ[item].dest + ReceiveQ[item].msg[3:]
self.orig_packet = orig_packet
self.aprspacket = unicode(orig_packet, 'ISO-8859-1')
self.dtarrival = dtarrival
self.dtsent = datetime(2000, 1, 1, 0, 0, 0)
self.msgid_new = str(int(round(time.time()*10**6)))[-5:] # Not alwasys used, for sending Baker Command Response Packets to client
self.key = self.src + self.msgid_new
self.type = ' '
self.dtfirstsent = datetime(2000, 1, 1, 0, 0, 0)
self.snddelays = settings.SEND_PACKETS_DELAY
self.sndcnt = 0
self.isValidBakerMessage = True
if debuglevel > 0:
print('%s APRS-IS > Baker Packet - [src, dest, msg, msgid, msgid_new, msgack, key, type, arrival - [%s, %s, %s, %s, %s, %s, %s, %s, %s]' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'), self.src, self.dest, self.msg, self.msgid, self.msgid_new, self.msgack, self.key, self.type, self.dtarrival) )
except Exception as error:
print ('exception clsBakerMessage')
traceback.print_exc()
self.isValidBakerMessage = False
class clsBakerPacket():
def __init__(self, pkt):
try:
# Is there a packet?
# Check validity of packet
self.isValidBakerPacket = False
if pkt[0].orig_packet.find('#') == 1:
# This is a keep alive message from APRS-IS server (APRS Packet with first char of '#') ignore it
if debuglevel > 0:
print ('%s APRS-IS > non-message - [keep-alive from APRS Server]' % (datetime.strftime(datetime.now(),'%Y-%m-%d %H:%M:%S')) )
self.isValidBakerPacket = False
elif pkt[0].destination == None:
# This is a non valid Message Packet
if debuglevel > 1:
print ('%s APRS-IS > message - [packet,%s]' % (datetime.strftime(datetime.now(),'%Y-%m-%d %H:%M:%S'), pkt[0].orig_packet.strip('\r\n')))
print ('%s APRS-IS > message - [non-message packet]\n' % (datetime.strftime(datetime.now(),'%Y-%m-%d %H:%M:%S') ))
print ('%s APRS-IS > non-message - []' % (datetime.strftime(datetime.now(),'%Y-%m-%d %H:%M:%S')) )
self.isValidBakerPacket = False
else:
# This is a valid message packet
if pkt[0].message:
self.dest = pkt[0].destination
self.src = pkt[0].src_callsign
self.orig_packet = pkt[0].orig_packet
self.msg = pkt[0].message
self.msgid = pkt[0].message_id
self.msgack = pkt[0].message_ack
self.dst = pkt[0].dst_callsign
self.path = pkt[0].path
self.comment = pkt[0].comment
if (self.msg.find('{') > 1 and self.msg.find('}') > 1 and self.msgid == None):
# Work around libfap shortcomings
# APRS "REPLY-ACK" is not handled by libfap, must be adjusted here until libfap fixes bug, APRSIS32 is only one found to have this anomoly as of 11/2015
# order is important
self.msgid = self.msg[self.msg.find('{') + 1:]
self.msg = self.msg[:self.msg.find('{')]
self.isValidBakerPacket = True
if self.isValidBakerPacket == True:
# Create Baker Message
self.BkrMsg = clsBakerMessage(self.src, self.dest, self.msg, self.msgid, self.msgack, self.orig_packet, datetime.now())
# Save msg to DB
pub.sendMessage('RecSaveDB', arg1=self.BkrMsg)
# Dispatch Baker Message
bmsgcopy = copy.copy(self.BkrMsg)
self.dispatchMessage(bmsgcopy)
#else:
# print('%s APRS-IS > invalid packet - [Houston, we have a problem' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')) )
except Exception, err:
print ('exception clsBakerPacket')
traceback.print_exc()
print ('pkt[0] - %s' % pkt[0])
self.isValidBakerPacket = False
def dispatchMessage(self, bmsg):
#bmsg = arg1
try:
# Either ACK it and push it into the Baker Command Queue or send it to the SendQ to be matched
if bmsg.msgid:
# Send to SendQ so an ack will be delivered
bmsg.type = 'Need2ACK'
bmsgcopy1 = copy.copy(bmsg)
pub.sendMessage('Need2ACK', arg1=bmsgcopy1)
# Process Baker Command
bmsgcopy2 = copy.copy(bmsg)
bmsgcopy2.type = 'BakerCmdResponse'
pub.sendMessage('NewBakerCmd', arg1=bmsgcopy2)
elif bmsg.msgack:
# This packet is rec'd from the client. It is a response to any Baker Message sent to the client.
# It needs to be matched with the original Baker message in the SendQ, otherwise the SendQ will send another out.
bmsg.type = 'MsgACK'
bmsgcopy = copy.copy(bmsg)
pub.sendMessage('MsgACK', arg1=bmsgcopy)
finally:
pass
class clsBakerCommand():
def __init__(self):
pub.subscribe(self.NewBakerCmd,'NewBakerCmd')
self.bkrCmds = settings.BAKER_COMMANDS
self.lstCmd = ()
def NewBakerCmd(self, arg1):
bmsg = arg1
self.lstCmd = bmsg.msg.split(',')
cmdKey = self.lstCmd[0]
if cmdKey in self.bkrCmds:
if debuglevel > 1:
print 'valid baker command - ', self.bkrCmds[cmdKey][0], self.bkrCmds[cmdKey][1], self.bkrCmds[cmdKey][2]
if len(self.lstCmd) == self.bkrCmds[cmdKey][1]:
if debuglevel > 0:
print ('%s %s - A [%s, from %s]' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'),'APRS IS > Baker Command ', bmsg.msg, bmsg.src))
pub.sendMessage(self.bkrCmds[cmdKey][0], arg1=bmsg, arg2=self.lstCmd)
else:
if debuglevel > 0:
print ('%s %s - A [%s, %s, %s, %s]' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'),'APRS IS > Baker Command N/A', bmsg.src, bmsg.dest, bmsg.msg, bmsg.msgid))
class clsAPRSConnection():
def __init__(self):
try:
# Create socket & connect
self.connected = False
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((settings.APRS_SERVER_HOST, settings.APRS_SERVER_PORT))
self.name = 'clsAPRSConnection'
pub.subscribe(self.PacketSend, 'PacketSend')
# Logon to APRS-IS Server
login = 'user %s pass %s vers BAKER V.01_01/12/2016 %s ' % (settings.APRS_USER, settings.APRS_PASSCODE, settings.FILTER_DETAILS)
self.sock.send(login)
self.sock_file = self.sock.makefile()
libfap.fap_init()
# handle initial response by aprs-is server
packet_str = self.sock_file.readline()
packet = libfap.fap_parseaprs(packet_str, len(packet_str), 0)
print ('\n%s - APRS-IS Server > login greeting [%s %s]\n' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'), packet[0].orig_packet.strip('\r\n'), packet[0].orig_packet.strip('\r\n') ))
libfap.fap_free(packet)
packet_str = self.sock_file.readline()
packet = libfap.fap_parseaprs(packet_str, len(packet_str), 0)
libfap.fap_free(packet)
self.connected = True
except Exception, err:
print ('exception in clsAPRSConnection')
traceback.print_exc()
# close socket
self.connected = False
if self.sock:
self.sock.shutdown(0)
self.sock.close()
def PacketSend(self, arg1):
pkt2Send = arg1
try:
self.sock_file.write(pkt2Send + '\r\n')
self.sock_file.flush()
if debuglevel > 0:
print "pkt2Send - ", pkt2Send
except Exception as error:
print('exception in clsAPRSConnection.PacketSend')
print ('sys.exc_info()[0] - ',sys.exc_info())
class clsBakerDB():
def __init__(self):
try:
if debuglevel > 1:
print "\nUsing APSW file",apsw.__file__ # from the extension module
print "APSW version",apsw.apswversion() # from the extension module
print "SQLite lib version",apsw.sqlitelibversion() # from the sqlite library code
print "SQLite header version",apsw.SQLITE_VERSION_NUMBER # from the sqlite header file at compile time
self.dbcon = apsw.Connection(settings.BAKER_DB)
self.lastrowid = 0
self.seconds = 6 # time to sleep before final close of DB
pub.subscribe(self.close, 'ShutdownDB')
pub.subscribe(self.bmRecSave, 'RecSaveDB')
pub.subscribe(self.bmSendSave, 'SendSaveDB')
pub.subscribe(self.BakerCmdInsertRunner, 'BakerCmdInsertRunner')
pub.subscribe(self.BakerCmdReport1, 'BakerCmdReport1')
except apsw.Error, e:
print "APSW error - all args:", e.args
self.dbcon = False
print "Error - clsBakerDB %s:" % e.args[0]
if self.dbcon:
self.dbcon.close()
def bmRecSave(self, arg1):
#save rec'd Baker Packet to DB Table
bmsg = arg1
data = (bmsg.dest, bmsg.src, bmsg.msg, bmsg.msgid, bmsg.msgack, bmsg.aprspacket, str(bmsg.dtarrival))
if self.dbcon:
cur = self.dbcon.cursor()
try:
#insert into db table
cnt = cur.execute ("insert into baker_packets_recd (dest, src, msg, msgid, msgack, aprspacket, dtarrival) values (?,?,?,?,?,?,?)", data)
cnt = self.dbcon.last_insert_rowid()
if cnt > 0:
self.lastrowid = self.dbcon.last_insert_rowid()
if debuglevel > 1:
cur.execute('SELECT * from baker_packets_recd order by id desc limit 10')
rows = cur.fetchall()
print 'Last 10 Baker Packets Sent'
for row in rows:
print row
except apsw.Error as error:
print "SQLite error at bmRecSave - all args:", error, error.args
self.bmRecSave = False
except Exception as error:
print ('exception clsBakerDB.bmRecSave')
traceback.print_exc()
print ('sys.exc_info()[0] - ',sys.exc_info()[0])
else:
print 'db connection is down'
def BakerCmdInsertRunner(self, arg1, arg2):
bmsg = arg1
lstbcmd = arg2
if self.dbcon:
cur = self.dbcon.cursor()
try:
# find existing record first, if there is not one then insert it
data = (lstbcmd[1], lstbcmd[2])
cnt = 0
for id, dest, src, station, competitor, dtin, dtout, comment in (cur.execute ("select id, dest, src, station, competitor, dtin, dtout, comment from baker_events where station = ? and competitor = ? limit 1", data)):
cnt = 1
# The record exists, so change the values and update it
newdtin = str(lstbcmd[3])
newdtout = str(lstbcmd[4])
newcomment = lstbcmd[5]
if newdtin <> dtin:
tmpcomment = "Prev In:" + str(dtin)
if newdtout <> dtout:
tmpcomment = tmpcomment + " Prev Out:" + str(dtout)
if newcomment <> comment:
tmpcomment = newcomment + " " + tmpcomment
if tmpcomment:
newestcomment = tmpcomment + " " + comment
data = (BakerCommon.epoch2iso8601time(newdtin), BakerCommon.epoch2iso8601time(newdtout), newestcomment, lstbcmd[1], lstbcmd[2])
cur.execute("update baker_events set dtin = ?, dtout = ?, comment = ? where station = ? and competitor = ? limit 1", data)
if cnt == 0:
# The record does not exist so insert it
data = (bmsg.dest, bmsg.src, lstbcmd[1], lstbcmd[2], BakerCommon.epoch2iso8601time(lstbcmd[3]), BakerCommon.epoch2iso8601time(lstbcmd[4]), lstbcmd[5])
rows = cur.execute ("insert into baker_events (dest, src, station, competitor, dtin, dtout, comment) values (?,?,?,?,?,?,?)", data)
print 'inserted new row - ', data
if debuglevel > 0:
print 'Last 10 Baker Commands Received'
cur.setrowtrace(self.rowtrace)
for row in cur.execute('SELECT dest, src, station, competitor, dtin, dtout, comment from baker_events order by id desc limit 10'):
pass
cur.setrowtrace(None)
except apsw.Error as error:
print "SQLite error at bmRecSave - all args:", error, error.args
self.bmRecSave = False
except Exception as error:
print ('exception clsBakerDB.bmRecSave')
traceback.print_exc()
print ('sys.exc_info()[0] - ',sys.exc_info()[0])
else:
print 'db connection is down'
def BakerCmdReport1(self, arg1, arg2):
bmsg = arg1
lstbcmd = arg2
if debuglevel > 0:
print 'Baker Command List - ', lstbcmd
if self.dbcon:
cur = self.dbcon.cursor()
try:
# find existing record first, if there is not one then insert it
data = (lstbcmd[1], lstbcmd[2])
cnt = 0
for id, dest, src, station, competitor, dtin, dtout, comment in (cur.execute ("select id, dest, src, station, competitor, dtin, dtout, comment from baker_events where station = ? and competitor = ? ", data)):
cnt = 1
# The record exists, so change the values and update it
msgnew = "r1".encode('utf-8') + "," + station.encode('utf-8') + "," + competitor.encode('utf-8') + "," + BakerCommon.iso86012epochtime(dtin.encode('utf-8')) + "," + BakerCommon.iso86012epochtime(dtout.encode('utf-8')) + "," + comment.encode('utf-8')
bmsgnew = clsBakerMessage(bmsg.dest, bmsg.src, msgnew, None, None, "Internal", datetime.now())
bmsgnew.type = 'BakerCmdResponse'
pub.sendMessage('Add2SendQ',arg1=bmsgnew)
if cnt == 0:
# The record does not exist
if debuglevel > 1:
print 'BakerCmdReport1, no result'
if debuglevel > 1:
print 'Last 10 Baker Packets Sent'
cur.setrowtrace(self.rowtrace)
for row in cur.execute('SELECT dest, src, station, competitor, dtin, dtout, comment from baker_events order by id desc limit 10'):
pass
cur.setrowtrace(None)
except apsw.Error as error:
print "SQLite error at bmRecSave - all args:", error, error.args
self.bmRecSave = False
except Exception as error:
print ('exception clsBakerDB.bmRecSave')
traceback.print_exc()
print ('sys.exc_info()[0] - ',sys.exc_info()[0])
else:
print 'db connection is down'
def rowtrace(self, cursor, row):
"""Called with each row of results before they are handed off. You can return None to
cause the row to be skipped or a different set of values to return"""
print "Row:", row
return row
def bmSendSave(self, arg1):
#save sent Baker Packet to DB Table
bmsg = arg1
self.saved_valid_message = False
cur = self.dbcon.cursor()
try:
# Find existing record (using bmsg)
data = (bmsg.msgid_new)
rows = list(cur.execute ("select id from baker_packets_sent where msgid_new = ?", [data]))
cnt = len(rows)
if cnt == 0:
# Insert new record
data = (bmsg.dest, bmsg.src, bmsg.msg, bmsg.msgid, bmsg.msgack, bmsg.msgid_new, bmsg.aprspacket, bmsg.sndcnt, str(bmsg.dtsent))
rows = list(cur.execute ("insert into baker_packets_sent (dest, src, msg, msgid, msgack, msgid_new, aprspacket, sndcnt, dtsent) values (?,?,?,?,?,?,?,?,?)", data))
cnt = len(rows)
else:
# Update existing record
data = (bmsg.sndcnt, bmsg.msgid_new)
rows = list(cur.execute ("update baker_packets_sent set sndcnt = ? where msgid_new = ?", data))
if cnt > 0:
self.saved_valid_message = True
if debuglevel > 1:
rows = list(cur.execute('SELECT * from baker_packets_sent order by id desc limit 10'))
print 'Last 10 Baker Packets Sent'
for row in rows:
print row
except apsw.Error as error:
print "SQLite error at bmSendSave - all args:", error, error.args
self.bmSendSave = False
except Exception as error:
print ('exception clsBakerDB')
traceback.print_exc()
print ('sys.exc_info()[0] - ',sys.exc_info()[0])
def close(self):
if self.dbcon:
self.dbcon.close()
print('%s %s - Terminating' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'), 'DB Connection'))
class thrdSendPacketQ(threading.Thread):
def __init__(self, name, seconds, que):
threading.Thread.__init__(self)
self.name = name
self.seconds = seconds
self.que = que
self.exit = False
pub.subscribe(self.close, 'Shutdown')
pub.subscribe(self.Add2SendQ, 'Add2SendQ')
pub.subscribe(self.Need2ACK, 'Need2ACK')
pub.subscribe(self.SendTestPacket, 'SendTestPacket')
pub.subscribe(self.MsgACK, 'MsgACK')
def run(self):
#global intExitFlag
print ('%s %s - Starting' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'), self.name))
self.thread_ident = threading.current_thread().ident
while not self.exit:
time.sleep(self.seconds)
if debuglevel > 0:
print ('%s %s - Count %s' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'),'Baker - SendQ ', len(self.que)))
if self.que:
self.CheckAndProcessQ()
print('%s %s - Terminating' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'), self.name))
return
def CheckAndProcessQ(self):
lDelete = []
for k, bmsg in self.que.items():
if debuglevel > 0:
print('%s APRS-IS > Baker Send Q item - [src, dest, msg, msgid, msgid_new, msgack, key, type, arrival - [%s, %s, %s, %s, %s, %s, %s, %s, %s]' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'), bmsg.src, bmsg.dest, bmsg.msg, bmsg.msgid, bmsg.msgid_new, bmsg.msgack, bmsg.key, bmsg.type, bmsg.dtarrival) )
for k, bmsg in self.que.items():
if bmsg.type == 'Need2ACK':
# Send ACK to this message, only send it once. If they do not receive this ACK, they will request another APRS Packet
pub.sendMessage('PacketSend', arg1=bmsg.aprspacket)
# Save msg to DB
bmsgcopy = copy.copy(bmsg)
pub.sendMessage('SendSaveDB', arg1=bmsgcopy)
if debuglevel > 0:
print("%s APRS-IS < ACK [%s]" % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'), bmsg.aprspacket))
lDelete.append(bmsg.key)
elif bmsg.type == 'MsgACK':
# Match and Delete
lDelete.append(bmsg.key)
print('%s %s - ACK Matched - [key - %s]' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'), self.name, bmsg.key))
elif bmsg.type == 'BakerCmdResponse':
# Send Baker Command Response Packets to requesting clients
dtnow = datetime.now()
# Only send when current time is greater than datefirstsent + snddelays(n)
if (bmsg.sndcnt == 0) or (dtnow > bmsg.dtfirstsent + timedelta(seconds=bmsg.snddelays[bmsg.sndcnt])):
if bmsg.sndcnt == 0:
bmsg.dtfirstsent = dtnow
if debuglevel > 0:
print ('%s %s - Attempt %s/%s [%s, %s, %s, %s, %s]' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'),'APRS IS < Send Baker Packet', bmsg.sndcnt + 1, len(bmsg.snddelays) ,bmsg.src, bmsg.dest, bmsg.msg, bmsg.msgid, bmsg.dtarrival))
bmsg.aprspacket = ("%s>APZ009,TCPIP*::%s:%s%s" % (bmsg.src.strip(), '{0: <9}'.format(bmsg.dest), bmsg.msg + '{', bmsg.msgid_new))
bmsg.dtsent = datetime.now()
bmsg.sndcnt += 1
pub.sendMessage('PacketSend', arg1=bmsg.aprspacket)
# Save msg to DB
bmsgcopy = copy.copy(bmsg)
pub.sendMessage('SendSaveDB', arg1=bmsgcopy)
# Delete from Q - tried to send the max number of times
if bmsg.sndcnt == len(bmsg.snddelays):
lDelete.append(bmsg.key)
if debuglevel > 0:
print("%s APRS-IS < Baker Response [%s]" % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'), bmsg.aprspacket))
# Delete items in sendQ as found in lDelete
for key in lDelete:
del self.que[key]
def Add2SendQ(self, arg1):
bmsg = arg1
self.que[bmsg.key] = bmsg
def Need2ACK(self, arg1):
# Ack Rec'd Message - convert bmsg from rec'd to sendable
bmsg = arg1
bmsgnew = clsBakerMessage(bmsg.src, bmsg.dest, 'ack' + bmsg.msgid, None,None, bmsg.orig_packet, bmsg.dtarrival)
bmsgnew.type = 'Need2ACK'
bmsgnew.dtfirstsent = datetime.now()
bmsgnew.aprspacket = ("%s>APZ009,TCPIP*::%s:%s" % (bmsgnew.dest.strip(), '{0: <9}'.format(bmsgnew.src), bmsgnew.msg)) #ACK only
bmsgnew.dtsent = bmsg.dtfirstsent
bmsgnew.sndcnt = 1
self.Add2SendQ(bmsgnew)
def MsgACK(self, arg1):
# Create key to match MsgACK Baker Packet to existing Baker Packet in SendQ. Send to SendQ
bmsg = arg1
bmsg.key = bmsg.dest + bmsg.msg[3:]
self.Add2SendQ(bmsg)
def SendTestPacket(self, arg1):
src1 = ''.join([c for c in settings.FILTER_DETAILS if c.isupper()])
dest1 = '{0: <9}'.format(settings.TEST_CLIENT)
bmsg = clsBakerMessage( src1, dest1, 'BAKER APRS-IS Messaging - Providence Marathon', None, None, 'Key of no key' , datetime.now())
bmsg.type = 'BakerCmdResponse'
self.Add2SendQ(bmsg)
if debuglevel > 0:
print ('%s %s - [yyy%s, %s, %s, %s, %s, %syyy]' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'),'APRS IS < Test Packet', bmsg.src, bmsg.dest, bmsg.msg, bmsg.msgid, bmsg.msgid_new, bmsg.dtarrival))
def close(self):
self.exit = True
class thrdKeyboardPollerChars(threading.Thread):
def __init__(self, name, seconds):
threading.Thread.__init__(self)
self.seconds = seconds
self.name = name
self.exit = False
pub.subscribe(self.close, 'Shutdown')
def run(self) :
global debuglevel
print ('%s %s - Starting' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'), self.name))
while not self.exit:
i, o, e = select.select([sys.stdin], [], [], self.seconds)
if i:
ch = sys.stdin.read(1)
ignore = sys.stdin.read(1)
print('Keyboard Input - %s' % ch.strip('\r\n'))
if ch == 't': # Send Test packet(s)
pub.sendMessage('SendTestPacket', arg1='TestPacket')
if ch == 'q': # Close Baker Down
#intExitFlag = 1
pub.sendMessage('Shutdown')
if ch == 'd': # Debug Level
debuglevel = debuglevel + 1
if debuglevel == 4: debuglevel = 0
print (" debuglevel - %s" % debuglevel)
print('%s %s - Terminating' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'), self.name))
return
def close(self):
self.exit = True
class thrdAPRSReadPackets( threading.Thread ) :
def __init__(self, name, APRSConn):
threading.Thread.__init__(self)
self.name = name
self.APRSConn = APRSConn
self.exit = False
pub.subscribe(self.close, 'Shutdown')
def run(self) :
print ('%s %s - Starting' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'), self.name))
self.thread_ident = threading.current_thread().ident
try:
# Watch APRS Connection for incoming packets
while APRSConn.connected and not self.exit:
# Get next APRSPacket, process it, wait for next one
rawAPRSPacket = self.APRSConn.sock_file.readline()
if rawAPRSPacket:
APRSPacket = libfap.fap_parseaprs(rawAPRSPacket, len(rawAPRSPacket), 0)
# Convert an APRS Packet to a Baker Packet
if APRSPacket and APRSPacket[0].orig_packet:
BPkt = clsBakerPacket(APRSPacket)
self.APRSConn.sock_file.flush()
libfap.fap_free(APRSPacket)
except KeyboardInterrupt:
print ('\n%s - APRS-IS Server > Connection terminated from keyboard []\n' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')))
self.exit = True
finally:
libfap.fap_cleanup()
self.APRSConn.sock.shutdown(0)
self.APRSConn.sock.close()
print('%s %s - Terminating' % (datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'), self.name))
pub.sendMessage('ShutdownDB')
def close(self):
self.exit = True
class BakerCommon(object):
@classmethod
def epoch2iso8601time(cls, epochSeconds):
try:
if isinstance(epochSeconds, str):
if len(epochSeconds) <> 10:
return epochSeconds
elif len(epochSeconds) == 10 and eval(epochSeconds) :
return datetime.fromtimestamp(eval(epochSeconds)).strftime('%Y-%m-%d %H:%M:%S')
else:
print 'epochSeconds is not a string -', epochSeconds
return 'non string'
except Exception as error:
return 'DB data is bad'
@classmethod
def iso86012epochtime(cls, iso8601String):
try:
if isinstance(iso8601String.encode('utf-8'), str) and len(iso8601String) == 19:
return str(int(time.mktime(time.strptime(iso8601String, '%Y-%m-%d %H:%M:%S'))))
else:
return iso8601String
except Exception as error:
print 'iso86012epochtime - ', traceback.print_exc()
return 'error in date parameter'
############################
# Main Program
############################
if __name__ == '__main__':
print ('\nBAKER - APRS-IS Messaging Server')
print ('BAKER - BARC.APRS.Kelly KE7QHW(SK).Event.Reporting')
print ('BAKER - 2015 - Brian - KG7AFQ - V0.1')
print ('BAKER - BSD3 - Open Source Licensed')
print ('\n(To Quit - type q <cr>)')
# execution flags
debuglevel = 1
# Create Ques
SendQ = {} #Thread to hold and process Baker sent messages until they have been fully processed.
# Hold Baker DB Connection and Functions
iBakerDB = clsBakerDB()
iBakerCmd = clsBakerCommand()
# Open Connection to APRS Server and DB
try:
APRSConn = clsAPRSConnection()
if APRSConn.connected:
# Create threads
tSndBkrPkts = thrdSendPacketQ('Baker SendQ', 5, SendQ) # Hold and process Baker Packets to be sent
tReadKeyBoardChars = thrdKeyboardPollerChars('Keyboard Poller', 1) # Watch for keyboard input
tAPRSReadPackets = thrdAPRSReadPackets('Read APRS Packets', APRSConn) # Read, validate and send Valid Baker Packets to q
# Start Threads
tSndBkrPkts.start()
tReadKeyBoardChars.start()
tAPRSReadPackets.start()
except Exception as error:
print ('\nBaker - Unable to make APRS IS Connection \nBaker - Check the config in the file settings.py \nBaker - Terminating\n\n\n')
if debuglevel > 1:
traceback.print_exc()
finally:
pass
###########################
### End
###########################
| {
"content_hash": "45e94afefb5fca60cd2f55daf50612b7",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 315,
"avg_line_length": 41.24262295081967,
"alnum_prop": 0.6545830352174259,
"repo_name": "KG7AFQ/baker",
"id": "d3a670ece763e487051f63923448215fbc82f333",
"size": "26701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "baker.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "33414"
}
],
"symlink_target": ""
} |
__author__ = 'arnaud'
from flask_selfdoc.autodoc import Autodoc # noqa: F401
from flask_selfdoc.autodoc import Selfdoc # noqa: F401
| {
"content_hash": "b740a8f31d289a042c5a183a940d9126",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 55,
"avg_line_length": 33.75,
"alnum_prop": 0.7407407407407407,
"repo_name": "jwg4/flask-autodoc",
"id": "edb516946c66de66125478c27cb1d90b3083e8d4",
"size": "135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_selfdoc/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2619"
},
{
"name": "Python",
"bytes": "19300"
}
],
"symlink_target": ""
} |
import re
import sys
import inspect
from mongrel2.config import rc
S_IP_ADDRESS = lambda x, token: ['ip_address', token]
S_WORD = lambda x, token: ['word', token]
S_EMAIL_ADDR = lambda x, token: ['email', token]
S_OPTION = lambda x, token: ['option', token.split("-")[-1]]
S_INT = lambda x, token: ['int', int(token) ]
S_BOOL = lambda x, token: ['bool', bool(token) ]
S_EMPTY = lambda x, token: ['empty', '']
S_STRING = lambda x, token: ['string', token]
S_TRAILING = lambda x, token: ['trailing', None]
class ArgumentError(Exception):
"""Thrown when args encounters a command line format error."""
pass
SCANNER = re.Scanner([
(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}", S_EMAIL_ADDR),
(r"[0-9]+\.[0-9]+\.[0-9]+\.[0-9]", S_IP_ADDRESS),
(r"-+[a-zA-Z0-9]+", S_OPTION),
(r"True", S_BOOL),
(r"[0-9]+", S_INT),
(r"--", S_TRAILING),
(r"[a-z\-]+", S_WORD),
(r"\s", S_EMPTY),
(r".+", S_STRING),
])
def match(tokens, of_type = None):
"""
Responsible for taking a token off and processing it, ensuring it is
of the correct type. If of_type is None (the default) then you are
asking for anything.
"""
# check the type (first element)
if of_type:
if not peek(tokens, of_type):
raise ArgumentError("Expecting '%s' type of argument not %s in tokens: %r. Read the help." %
(of_type, tokens[0][0], tokens))
# take the token off the front
tok = tokens.pop(0)
# return the value (second element)
return tok[1]
def peek(tokens, of_type):
"""Returns true if the next token is of the type, false if not. It does not
modify the token stream the way match does."""
if len(tokens) == 0:
raise ArgumentError("This command expected more on the command line. Not sure how you did that.")
return tokens[0][0] == of_type
def trailing_production(data, tokens):
"""Parsing production that handles trailing arguments after a -- is given."""
data['TRAILING'] = [x[1] for x in tokens]
del tokens[:]
def option_production(data, tokens):
"""The Option production, used for -- or - options. The number of - aren't
important. It will handle either individual options, or paired options."""
if peek(tokens, 'trailing'):
# this means the rest are trailing arguments, collect them up
match(tokens, 'trailing')
trailing_production(data, tokens)
else:
opt = match(tokens, 'option')
if not tokens:
# last one, it's just true
data[opt] = True
elif peek(tokens, 'option') or peek(tokens, 'trailing'):
# the next one is an option so just set this to true
data[opt] = True
else:
# this option is set to something else, so we'll grab that
data[opt] = match(tokens)
def options_production(tokens):
"""List of options, optionally after the command has already been taken off."""
data = {}
while tokens:
option_production(data, tokens)
return data
def command_production(tokens):
"""The command production, just pulls off a word really."""
return match(tokens, 'word')
def tokenize(argv):
"""Goes through the command line args and tokenizes each one, trying to match
something in the scanner. If any argument doesn't completely parse then it
is considered a 'string' and returned raw."""
tokens = []
for arg in argv:
toks, remainder = SCANNER.scan(arg)
if remainder or len(toks) > 1:
tokens.append(['string', arg])
else:
tokens += toks
return tokens
def parse(argv):
"""
Tokenizes and then parses the command line as wither a command style or
plain options style argument list. It determines this by simply if the
first argument is a 'word' then it's a command. If not then it still
returns the first element of the tuple as None. This means you can do:
command, options = args.parse(sys.argv[1:])
and if command==None then it was an option style, if not then it's a command
to deal with.
"""
tokens = tokenize(argv)
if not tokens:
return None, {}
elif peek(tokens, "word"):
# this is a command style argument
return command_production(tokens), options_production(tokens)
else:
# options only style
return None, options_production(tokens)
def determine_kwargs(function):
"""
Uses the inspect module to figure out what the keyword arguments
are and what they're defaults should be, then creates a dict with
that setup. The results of determine_kwargs() is typically handed
to ensure_defaults().
"""
spec = inspect.getargspec(function)
keys = spec[0]
values = spec[-1]
result = {}
for i in range(0, len(keys)):
result[keys[i]] = values[i]
return result
def ensure_defaults(options, reqs):
"""
Goes through the given options and the required ones and does the
work of making sure they match. It will raise an ArgumentError
if any option is required. It will also detect that required TRAILING
arguments were not given and raise a separate error for that.
"""
for key in reqs:
if reqs[key] == None:
# explicitly set to required
if key not in options:
if key == "TRAILING":
raise ArgumentError("Additional arguments required after a -- on the command line.")
else:
raise ArgumentError("Option -%s is required by this command." % key)
else:
if key not in options:
options[key] = reqs[key]
def command_module(mod, command, options, ending="_command"):
"""Takes a module, uses the command to run that function."""
function = mod.__dict__[command+ending]
kwargs = determine_kwargs(function)
ensure_defaults(options, kwargs)
return function(**options)
def available_help(mod, ending="_command"):
"""Returns the dochelp from all functions in this module that have _command
at the end."""
help_text = []
for key in mod.__dict__:
if key.endswith(ending):
name = key.split(ending)[0]
help_text.append(name + ":\n" + mod.__dict__[key].__doc__)
return help_text
def help_for_command(mod, command, ending="_command"):
"""
Returns the help string for just this one command in the module.
If that command doesn't exist then it will return None so you can
print an error message.
"""
if command in available_commands(mod):
return mod.__dict__[command + ending].__doc__
else:
return None
def available_commands(mod, ending="_command"):
"""Just returns the available commands, rather than the whole long list."""
commands = []
for key in mod.__dict__:
if key.endswith(ending):
commands.append(key.split(ending)[0])
commands.sort()
return commands
def invalid_command_message(mod):
"""Called when you give an invalid command to print what you can use."""
print("You must specify a valid command. Try these: ")
print("\n".join(available_commands(mod)))
def parse_and_run_command(argv, mod, default_command=None):
"""
A one-shot function that parses the args, and then runs the command
that the user specifies. If you set a default_command, and they don't
give one then it runs that command. If you don't specify a command,
and they fail to give one then it prints an error.
"""
try:
command, options = parse(argv)
except ArgumentError as e:
print("Parsing Error:", e)
return
if not command and default_command:
command = default_command
elif not command and not default_command:
return invalid_command_message(mod)
if command not in available_commands(mod):
return invalid_command_message(mod)
command_module(mod, command, options)
return True
| {
"content_hash": "99125e341bfcd2c5d7689cef69d0fb7d",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 106,
"avg_line_length": 32.65182186234818,
"alnum_prop": 0.6250464972101674,
"repo_name": "mongrel2/mongrel2",
"id": "dbb56e316ed5ab22d53994316f5b3549db74bd60",
"size": "8065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/python/mongrel2/config/args.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "4960"
},
{
"name": "Awk",
"bytes": "868"
},
{
"name": "C",
"bytes": "1517822"
},
{
"name": "C++",
"bytes": "7707"
},
{
"name": "HTML",
"bytes": "21"
},
{
"name": "Makefile",
"bytes": "18458"
},
{
"name": "PHP",
"bytes": "2225"
},
{
"name": "Python",
"bytes": "2821"
},
{
"name": "Ragel",
"bytes": "43448"
},
{
"name": "Roff",
"bytes": "739"
},
{
"name": "Shell",
"bytes": "8597"
},
{
"name": "TSQL",
"bytes": "75502"
},
{
"name": "Yacc",
"bytes": "2870"
}
],
"symlink_target": ""
} |
def getMem():
with open('/proc/meminfo') as f:
total = int(f.readline().split()[1])
free = int(f.readline().split()[1])
buffers = int(f.readline().split()[1])
cache = int(f.readline().split()[1])
mem_use = total-free-buffers-cache
print mem_use/1024
while True:
time.sleep(1)
getMem()
| {
"content_hash": "60b4274e913cb64d55f64917eac0a4db",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 46,
"avg_line_length": 30.636363636363637,
"alnum_prop": 0.5667655786350149,
"repo_name": "edonyM/toolkitem",
"id": "45c73a1a102b7e5bfc97985adae35bdb1480cd96",
"size": "337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rosmonitor/example_monitor2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "919"
},
{
"name": "Python",
"bytes": "301993"
},
{
"name": "Shell",
"bytes": "3676"
}
],
"symlink_target": ""
} |
from datetime import datetime
from distutils import spawn
import argparse
import json
import os
import platform
import shutil
import socket
import sys
import urllib
import urllib2
__version__ = '5.9.1'
###############################################################################
# Options
###############################################################################
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'-d', '--dependencies', dest='install_dependencies', action='store_true',
help='install virtualenv and python dependencies',
)
PARSER.add_argument(
'-s', '--start', dest='start', action='store_true',
help='starts the dev_appserver.py with storage_path pointing to temp',
)
PARSER.add_argument(
'-o', '--host', dest='host', action='store', default='127.0.0.1',
help='the host to start the dev_appserver.py',
)
PARSER.add_argument(
'-p', '--port', dest='port', action='store', default='8080',
help='the port to start the dev_appserver.py',
)
PARSER.add_argument(
'--appserver-args', dest='args', nargs=argparse.REMAINDER, default=[],
help='all following args are passed to dev_appserver.py',
)
PARSER.add_argument(
'-v', '--version', dest='show_version', action='store_true',
help='Show gae-init version',
)
ARGS = PARSER.parse_args()
###############################################################################
# Globals
###############################################################################
BAD_ENDINGS = ['pyc', 'pyo', '~']
GAE_PATH = ''
IS_WINDOWS = platform.system() == 'Windows'
###############################################################################
# Directories
###############################################################################
DIR_MAIN = 'main'
DIR_TEMP = 'temp'
DIR_VENV = os.path.join(DIR_TEMP, 'venv')
DIR_LIB = os.path.join(DIR_MAIN, 'lib')
DIR_LIBX = os.path.join(DIR_MAIN, 'libx')
FILE_LIB = '%s.zip' % DIR_LIB
FILE_REQUIREMENTS = 'requirements.txt'
FILE_PIP_GUARD = os.path.join(DIR_TEMP, 'pip.guard')
FILE_VENV = os.path.join(DIR_VENV, 'Scripts', 'activate.bat') \
if IS_WINDOWS \
else os.path.join(DIR_VENV, 'bin', 'activate')
DIR_STORAGE = os.path.join(DIR_TEMP, 'storage')
FILE_UPDATE = os.path.join(DIR_TEMP, 'update.json')
###############################################################################
# Other global variables
###############################################################################
CORE_VERSION_URL = 'https://gae-init.appspot.com/_s/version/'
INTERNET_TEST_URL = 'https://www.google.com'
REQUIREMENTS_URL = 'http://docs.gae-init.appspot.com/requirement/'
###############################################################################
# Helpers
###############################################################################
def print_out(script, filename=''):
timestamp = datetime.now().strftime('%H:%M:%S')
if not filename:
filename = '-' * 46
script = script.rjust(12, '-')
print '[%s] %12s %s' % (timestamp, script, filename)
def make_dirs(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def listdir(directory, split_ext=False):
try:
if split_ext:
return [os.path.splitext(dir_)[0] for dir_ in os.listdir(directory)]
else:
return os.listdir(directory)
except OSError:
return []
def site_packages_path():
if IS_WINDOWS:
return os.path.join(DIR_VENV, 'Lib', 'site-packages')
py_version = 'python%s.%s' % sys.version_info[:2]
return os.path.join(DIR_VENV, 'lib', py_version, 'site-packages')
def create_virtualenv():
if not os.path.exists(FILE_VENV):
os.system('virtualenv --no-site-packages %s' % DIR_VENV)
os.system('echo %s >> %s' % (
'set PYTHONPATH=' if IS_WINDOWS else 'unset PYTHONPATH', FILE_VENV
))
pth_file = os.path.join(site_packages_path(), 'gae.pth')
echo_to = 'echo %s >> {pth}'.format(pth=pth_file)
os.system(echo_to % find_gae_path())
os.system(echo_to % os.path.abspath(DIR_LIBX))
fix_path_cmd = 'import dev_appserver; dev_appserver.fix_sys_path()'
os.system(echo_to % (
fix_path_cmd if IS_WINDOWS else '"%s"' % fix_path_cmd
))
return True
def exec_pip_commands(command):
script = []
if create_virtualenv():
activate_cmd = 'call %s' if IS_WINDOWS else 'source %s'
activate_cmd %= FILE_VENV
script.append(activate_cmd)
script.append('echo %s' % command)
script.append('%s SKIP_GOOGLEAPICLIENT_COMPAT_CHECK=1' %
('set' if IS_WINDOWS else 'export'))
script.append(command)
script = '&'.join(script) if IS_WINDOWS else \
'/bin/bash -c "%s"' % ';'.join(script)
os.system(script)
def make_guard(fname, cmd, spec):
with open(fname, 'w') as guard:
guard.write('Prevents %s execution if newer than %s' % (cmd, spec))
def guard_is_newer(guard, watched):
if os.path.exists(guard):
return os.path.getmtime(guard) > os.path.getmtime(watched)
return False
def check_if_pip_should_run():
return not guard_is_newer(FILE_PIP_GUARD, FILE_REQUIREMENTS)
def install_py_libs():
if not check_if_pip_should_run() and os.path.exists(DIR_LIB):
return
exec_pip_commands('pip install -q -r %s' % FILE_REQUIREMENTS)
exclude_ext = ['.pth', '.pyc', '.egg-info', '.dist-info', '.so']
exclude_prefix = ['setuptools-', 'pip-', 'Pillow-']
exclude = [
'test', 'tests', 'pip', 'setuptools', '_markerlib', 'PIL',
'easy_install.py', 'pkg_resources', 'pkg_resources.py'
]
def _exclude_prefix(pkg):
for prefix in exclude_prefix:
if pkg.startswith(prefix):
return True
return False
def _exclude_ext(pkg):
for ext in exclude_ext:
if pkg.endswith(ext):
return True
return False
def _get_dest(pkg):
make_dirs(DIR_LIB)
return os.path.join(DIR_LIB, pkg)
site_packages = site_packages_path()
dir_libs = listdir(DIR_LIB)
dir_libs.extend(listdir(DIR_LIBX))
for dir_ in listdir(site_packages):
if dir_ in dir_libs or dir_ in exclude:
continue
if _exclude_prefix(dir_) or _exclude_ext(dir_):
continue
src_path = os.path.join(site_packages, dir_)
copy = shutil.copy if os.path.isfile(src_path) else shutil.copytree
copy(src_path, _get_dest(dir_))
make_guard(FILE_PIP_GUARD, 'pip', FILE_REQUIREMENTS)
def install_dependencies():
make_dirs(DIR_TEMP)
install_py_libs()
def check_for_update():
if os.path.exists(FILE_UPDATE):
mtime = os.path.getmtime(FILE_UPDATE)
last = datetime.utcfromtimestamp(mtime).strftime('%Y-%m-%d')
today = datetime.utcnow().strftime('%Y-%m-%d')
if last == today:
return
try:
with open(FILE_UPDATE, 'a'):
os.utime(FILE_UPDATE, None)
request = urllib2.Request(
CORE_VERSION_URL,
urllib.urlencode({'version': __version__}),
)
response = urllib2.urlopen(request)
with open(FILE_UPDATE, 'w') as update_json:
update_json.write(response.read())
except (urllib2.HTTPError, urllib2.URLError):
pass
def print_out_update(force_show=False):
try:
import pip
SemVer = pip.util.version.SemanticVersion
except AttributeError:
import pip._vendor.distlib.version
SemVer = pip._vendor.distlib.version.SemanticVersion
try:
with open(FILE_UPDATE, 'r') as update_json:
data = json.load(update_json)
if SemVer(__version__) < SemVer(data['version']) or force_show:
print_out('UPDATE')
print_out(data['version'], 'Latest version of gae-init')
print_out(__version__, 'Your version is a bit behind')
print_out('CHANGESET', data['changeset'])
except (ValueError, KeyError):
os.remove(FILE_UPDATE)
except IOError:
pass
###############################################################################
# Doctor
###############################################################################
def internet_on():
try:
urllib2.urlopen(INTERNET_TEST_URL, timeout=2)
return True
except (urllib2.URLError, socket.timeout):
return False
def check_requirement(check_func):
result, name, help_url_id = check_func()
if not result:
print_out('NOT FOUND', name)
if help_url_id:
print 'Please see %s%s' % (REQUIREMENTS_URL, help_url_id)
return False
return True
def find_gae_path():
global GAE_PATH
if GAE_PATH:
return GAE_PATH
if IS_WINDOWS:
gae_path = None
for path in os.environ['PATH'].split(os.pathsep):
if os.path.isfile(os.path.join(path, 'dev_appserver.py')):
gae_path = path
else:
gae_path = spawn.find_executable('dev_appserver.py')
if gae_path:
gae_path = os.path.dirname(os.path.realpath(gae_path))
if not gae_path:
return ''
gcloud_exec = 'gcloud.cmd' if IS_WINDOWS else 'gcloud'
if not os.path.isfile(os.path.join(gae_path, gcloud_exec)):
GAE_PATH = gae_path
else:
gae_path = os.path.join(gae_path, '..', 'platform', 'google_appengine')
if os.path.exists(gae_path):
GAE_PATH = os.path.realpath(gae_path)
return GAE_PATH
def check_internet():
return internet_on(), 'Internet', ''
def check_gae():
return bool(find_gae_path()), 'Google App Engine SDK', '#gae'
def check_git():
return bool(spawn.find_executable('git')), 'Git', '#git'
def check_nodejs():
return bool(spawn.find_executable('node')), 'Node.js', '#nodejs'
def check_pip():
return bool(spawn.find_executable('pip')), 'pip', '#pip'
def check_virtualenv():
return bool(spawn.find_executable('virtualenv')), 'virtualenv', '#virtualenv'
def doctor_says_ok():
checkers = [check_gae, check_git, check_nodejs, check_pip, check_virtualenv]
if False in [check_requirement(check) for check in checkers]:
sys.exit(1)
return check_requirement(check_internet)
###############################################################################
# Main
###############################################################################
def run_start():
make_dirs(DIR_STORAGE)
port = int(ARGS.port)
run_command = ' '.join(map(str, [
'dev_appserver.py',
DIR_MAIN,
'--host %s' % ARGS.host,
'--port %s' % port,
'--admin_port %s' % (port + 1),
'--storage_path=%s' % DIR_STORAGE,
'--skip_sdk_update_check',
] + ARGS.args))
os.system(run_command)
def run():
if len(sys.argv) == 1 or (ARGS.args and not ARGS.start):
PARSER.print_help()
sys.exit(1)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if doctor_says_ok():
install_dependencies()
check_for_update()
if ARGS.show_version:
print_out_update(force_show=True)
else:
print_out_update()
if ARGS.start:
run_start()
if ARGS.install_dependencies:
install_dependencies()
if __name__ == '__main__':
run()
| {
"content_hash": "8883ba0d1bd39ac3a596a04496a3ddc7",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 79,
"avg_line_length": 28.361702127659573,
"alnum_prop": 0.5837396849212303,
"repo_name": "lipis/gae-init",
"id": "50694c82f3021240d176a07fdbe8e5fef7325fc6",
"size": "10703",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5385"
},
{
"name": "CoffeeScript",
"bytes": "16640"
},
{
"name": "HTML",
"bytes": "68867"
},
{
"name": "JavaScript",
"bytes": "65"
},
{
"name": "Python",
"bytes": "120538"
},
{
"name": "Shell",
"bytes": "1082"
}
],
"symlink_target": ""
} |
"""
Start up a Simple topology
"""
from mininet.net import Mininet
from mininet.node import Controller, RemoteController
from mininet.log import setLogLevel, info, error, warn, debug
from mininet.cli import CLI
from mininet.topo import Topo
from mininet.util import quietRun
from mininet.moduledeps import pathCheck
from mininet.link import Link, TCLink
from sys import exit
import os.path
from subprocess import Popen, STDOUT, PIPE
import sys
#import argparse
class MyController( Controller ):
def __init__( self, name, ip='127.0.0.1', port=6633, **kwargs):
"""Init.
name: name to give controller
ip: the IP address where the remote controller is
listening
port: the port where the remote controller is listening"""
Controller.__init__( self, name, ip=ip, port=port, **kwargs )
def start( self ):
"Overridden to do nothing."
return
def stop( self ):
"Overridden to do nothing."
return
def checkListening( self ):
"Warn if remote controller is not accessible"
listening = self.cmd( "echo A | telnet -e A %s %d" %
( self.ip, self.port ) )
if 'Unable' in listening:
warn( "Unable to contact the remote controller"
" at %s:%d\n" % ( self.ip, self.port ) )
class SDNTopo( Topo ):
"SDN Topology"
def __init__( self, *args, **kwargs ):
Topo.__init__( self, *args, **kwargs )
sw1 = self.addSwitch('sw1', dpid='0000000000000101')
sw2 = self.addSwitch('sw2', dpid='0000000000000102')
sw3 = self.addSwitch('sw3', dpid='0000000000000103')
sw4 = self.addSwitch('sw4', dpid='0000000000000104')
sw5 = self.addSwitch('sw5', dpid='0000000000000105')
sw6 = self.addSwitch('sw6', dpid='0000000000000106')
host1 = self.addHost( 'host1' )
host2 = self.addHost( 'host2' )
host3 = self.addHost( 'host3' )
host4 = self.addHost( 'host4' )
host5 = self.addHost( 'host5' )
host6 = self.addHost( 'host6' )
self.addLink( host1, sw1 )
self.addLink( host2, sw2 )
self.addLink( host3, sw3 )
self.addLink( host4, sw4 )
self.addLink( host5, sw5 )
self.addLink( host6, sw6 )
self.addLink( sw1, sw2 )
self.addLink( sw1, sw6 )
self.addLink( sw2, sw3 )
self.addLink( sw3, sw4 )
self.addLink( sw3, sw6 )
self.addLink( sw4, sw5 )
self.addLink( sw5, sw6 )
self.addLink( sw4, sw6 )
def startsshd( host ):
"Start sshd on host"
info( '*** Starting sshd\n' )
name, intf, ip = host.name, host.defaultIntf(), host.IP()
banner = '/tmp/%s.banner' % name
host.cmd( 'echo "Welcome to %s at %s" > %s' % ( name, ip, banner ) )
host.cmd( '/usr/sbin/sshd -o "Banner %s"' % banner, '-o "UseDNS no"' )
info( '***', host.name, 'is running sshd on', intf, 'at', ip, '\n' )
def startsshds ( hosts ):
for h in hosts:
startsshd( h )
def stopsshd( ):
"Stop *all* sshd processes with a custom banner"
info( '*** Shutting down stale sshd/Banner processes ',
quietRun( "pkill -9 -f Banner" ), '\n' )
def sdnnet(opt):
# os.system('/home/ubuntu/openflow/controller/controller ptcp: &')
# os.system('/home/ubuntu/openflow/controller/controller ptcp:7000 &')
topo = SDNTopo()
info( '*** Creating network\n' )
# net = Mininet( topo=topo, controller=RemoteController )
net = Mininet( topo=topo, controller=MyController, link=TCLink)
# dc = DebugController('c3', ip='127.0.0.1', port=7000)
# net.addController(dc)
# net.addController(controller=RemoteController)
host1, host2, host3, host4, host5, host6 = net.get( 'host1', 'host2', 'host3', 'host4', 'host5', 'host6')
## Adding 2nd, 3rd and 4th interface to host1 connected to sw1 (for another BGP peering)
sw1 = net.get('sw1')
sw2 = net.get('sw2')
sw3 = net.get('sw3')
sw4 = net.get('sw4')
sw5 = net.get('sw5')
sw6 = net.get('sw6')
net.start()
sw2.attach('tap01_2')
sw3.attach('tap01_3')
sw4.attach('tap01_4')
sw4.attach('tap01_5')
sw5.attach('tap01_6')
sw6.attach('tap01_7')
sw1.attach('tap01_8')
host1.defaultIntf().setIP('192.168.100.141/16')
host2.defaultIntf().setIP('192.168.100.142/16')
host3.defaultIntf().setIP('192.168.100.143/16')
host4.defaultIntf().setIP('192.168.100.144/16')
host5.defaultIntf().setIP('192.168.100.145/16')
host6.defaultIntf().setIP('192.168.100.146/16')
hosts = [ host1, host2, host3, host4, host5, host6 ]
stopsshd ()
startsshds ( hosts )
if opt=="cli":
CLI(net)
stopsshd()
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
if len(sys.argv) == 1:
sdnnet("cli")
elif len(sys.argv) == 2 and sys.argv[1] == "-n":
sdnnet("nocli")
else:
print "%s [-n]" % sys.argv[0]
| {
"content_hash": "d198fa91d8331ccdb087b49922c9e861",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 109,
"avg_line_length": 32.12258064516129,
"alnum_prop": 0.5955011046394858,
"repo_name": "opennetworkinglab/spring-open",
"id": "a0e184e7687f70e27977873d8939236f6c884b29",
"size": "4998",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "old-scripts/test-network/mininet/dev_network_core_1.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "34315"
},
{
"name": "CSS",
"bytes": "10104"
},
{
"name": "Java",
"bytes": "3755244"
},
{
"name": "JavaScript",
"bytes": "110851"
},
{
"name": "Python",
"bytes": "255081"
},
{
"name": "Ruby",
"bytes": "6178"
},
{
"name": "Shell",
"bytes": "114478"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import random
SEPARATORS = {
'colon': ':',
'dash': '-',
'none': '',
'space': ' ',
}
def parse_args():
parser = argparse.ArgumentParser(description='Generate random MAC address')
parser.add_argument('-s', '--separator',
choices=sorted(SEPARATORS.keys()), default='colon',
help='What separator to use between the bytes in the output.')
return parser.parse_args()
def main():
args = parse_args()
separator = SEPARATORS[args.separator]
mac = [ random.randint(0, 255) for x in range(0, 6) ]
mac[0] = (mac[0] & 0xfc) | 0x02
mac = separator.join([ '{0:02x}'.format(x) for x in mac ])
print(mac)
if __name__ == '__main__':
main()
| {
"content_hash": "633d971f2e44554a4103f3b843090968",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 86,
"avg_line_length": 27.06896551724138,
"alnum_prop": 0.575796178343949,
"repo_name": "olavmrk/python-macgen",
"id": "a17e1fcb820c54b1ba276e278912a7ca66178d88",
"size": "807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "macgen.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "807"
}
],
"symlink_target": ""
} |
__all__ = ['statement_to_annotations']
from indra.assemblers.english import EnglishAssembler
from indra.databases import identifiers
from indra.statements.agent import get_grounding, default_ns_order
grounding_ns = default_ns_order + \
['NCIT', 'PUBCHEM', 'CHEMBL']
def statement_to_annotations(stmt, annotate_agents=True):
annotation_text = get_annotation_text(stmt,
annotate_agents=annotate_agents)
annotations = []
for ev in stmt.evidence:
annot = evidence_to_annotation(ev)
if annot is None:
continue
annot['annotation'] = annotation_text
annotations.append(annot)
return annotations
def evidence_to_annotation(evidence):
if not evidence.text:
return None
if evidence.text_refs.get('PMCID'):
url = 'https://www.ncbi.nlm.nih.gov/pmc/articles/%s/' % \
evidence.text_refs['PMCID']
elif evidence.pmid:
url = 'https://pubmed.ncbi.nlm.nih.gov/%s/' % evidence.pmid
elif evidence.text_refs.get('URL'):
url = evidence.text_refs['URL']
else:
return None
return {
'url': url,
'target_text': evidence.text,
'tags': [evidence.source_api]
}
def get_annotation_text(stmt, annotate_agents=True):
ea = EnglishAssembler(stmts=[stmt])
annotation_text = ea.make_model()
if annotate_agents:
inserts = []
for agent_wc in ea.stmt_agents[0]:
for insert_begin, insert_len in inserts:
if insert_begin < agent_wc.coords[0]:
agent_wc.update_coords(insert_len)
db_ns, db_id = get_grounding(agent_wc.db_refs,
grounding_ns)
if not db_ns:
continue
identifiers_url = \
identifiers.get_identifiers_url(db_ns, db_id)
grounding_text = '[%s](%s)' % (agent_wc.name, identifiers_url)
insert_len = len(grounding_text) - agent_wc.coords[1] + \
agent_wc.coords[0]
inserts.append((agent_wc.coords[0], insert_len))
before_part = annotation_text[:agent_wc.coords[0]]
after_part = annotation_text[agent_wc.coords[1]:]
annotation_text = ''.join([before_part, grounding_text,
after_part])
return annotation_text
| {
"content_hash": "35a314c66b03a0ac66c7d8dc7db7ddf3",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 74,
"avg_line_length": 35.35294117647059,
"alnum_prop": 0.5782029950083195,
"repo_name": "bgyori/indra",
"id": "7d369c63e93e71317ef6d0cc36ce93bdf6094f61",
"size": "2404",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "indra/sources/hypothesis/annotator.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "169"
},
{
"name": "Dockerfile",
"bytes": "1710"
},
{
"name": "HTML",
"bytes": "28917"
},
{
"name": "JavaScript",
"bytes": "13276"
},
{
"name": "Python",
"bytes": "3519860"
}
],
"symlink_target": ""
} |
import numpy as np
import pytest
from sklearn.compose import make_column_transformer
from sklearn.datasets import load_breast_cancer, make_classification
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import average_precision_score, precision_recall_curve
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC, SVR
from sklearn.utils import shuffle
from sklearn.metrics import PrecisionRecallDisplay, plot_precision_recall_curve
# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved
pytestmark = pytest.mark.filterwarnings(
"ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:"
"matplotlib.*"
)
def test_precision_recall_display_validation(pyplot):
"""Check that we raise the proper error when validating parameters."""
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=5, random_state=0
)
with pytest.raises(NotFittedError):
PrecisionRecallDisplay.from_estimator(SVC(), X, y)
regressor = SVR().fit(X, y)
y_pred_regressor = regressor.predict(X)
classifier = SVC(probability=True).fit(X, y)
y_pred_classifier = classifier.predict_proba(X)[:, -1]
err_msg = "PrecisionRecallDisplay.from_estimator only supports classifiers"
with pytest.raises(ValueError, match=err_msg):
PrecisionRecallDisplay.from_estimator(regressor, X, y)
err_msg = "Expected 'estimator' to be a binary classifier, but got SVC"
with pytest.raises(ValueError, match=err_msg):
PrecisionRecallDisplay.from_estimator(classifier, X, y)
err_msg = "{} format is not supported"
with pytest.raises(ValueError, match=err_msg.format("continuous")):
# Force `y_true` to be seen as a regression problem
PrecisionRecallDisplay.from_predictions(y + 0.5, y_pred_classifier, pos_label=1)
with pytest.raises(ValueError, match=err_msg.format("multiclass")):
PrecisionRecallDisplay.from_predictions(y, y_pred_regressor, pos_label=1)
err_msg = "Found input variables with inconsistent numbers of samples"
with pytest.raises(ValueError, match=err_msg):
PrecisionRecallDisplay.from_predictions(y, y_pred_classifier[::2])
X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
y += 10
classifier.fit(X, y)
y_pred_classifier = classifier.predict_proba(X)[:, -1]
err_msg = r"y_true takes value in {10, 11} and pos_label is not specified"
with pytest.raises(ValueError, match=err_msg):
PrecisionRecallDisplay.from_predictions(y, y_pred_classifier)
# FIXME: Remove in 1.2
def test_plot_precision_recall_curve_deprecation(pyplot):
"""Check that we raise a FutureWarning when calling
`plot_precision_recall_curve`."""
X, y = make_classification(random_state=0)
clf = LogisticRegression().fit(X, y)
deprecation_warning = "Function plot_precision_recall_curve is deprecated"
with pytest.warns(FutureWarning, match=deprecation_warning):
plot_precision_recall_curve(clf, X, y)
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
def test_precision_recall_display_plotting(pyplot, constructor_name, response_method):
"""Check the overall plotting rendering."""
X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
pos_label = 1
classifier = LogisticRegression().fit(X, y)
classifier.fit(X, y)
y_pred = getattr(classifier, response_method)(X)
y_pred = y_pred if y_pred.ndim == 1 else y_pred[:, pos_label]
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
if constructor_name == "from_estimator":
display = PrecisionRecallDisplay.from_estimator(
classifier, X, y, response_method=response_method
)
else:
display = PrecisionRecallDisplay.from_predictions(
y, y_pred, pos_label=pos_label
)
precision, recall, _ = precision_recall_curve(y, y_pred, pos_label=pos_label)
average_precision = average_precision_score(y, y_pred, pos_label=pos_label)
np.testing.assert_allclose(display.precision, precision)
np.testing.assert_allclose(display.recall, recall)
assert display.average_precision == pytest.approx(average_precision)
import matplotlib as mpl
assert isinstance(display.line_, mpl.lines.Line2D)
assert isinstance(display.ax_, mpl.axes.Axes)
assert isinstance(display.figure_, mpl.figure.Figure)
assert display.ax_.get_xlabel() == "Recall (Positive label: 1)"
assert display.ax_.get_ylabel() == "Precision (Positive label: 1)"
# plotting passing some new parameters
display.plot(alpha=0.8, name="MySpecialEstimator")
expected_label = f"MySpecialEstimator (AP = {average_precision:0.2f})"
assert display.line_.get_label() == expected_label
assert display.line_.get_alpha() == pytest.approx(0.8)
@pytest.mark.parametrize(
"constructor_name, default_label",
[
("from_estimator", "LogisticRegression (AP = {:.2f})"),
("from_predictions", "Classifier (AP = {:.2f})"),
],
)
def test_precision_recall_display_name(pyplot, constructor_name, default_label):
"""Check the behaviour of the name parameters"""
X, y = make_classification(n_classes=2, n_samples=100, random_state=0)
pos_label = 1
classifier = LogisticRegression().fit(X, y)
classifier.fit(X, y)
y_pred = classifier.predict_proba(X)[:, pos_label]
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
if constructor_name == "from_estimator":
display = PrecisionRecallDisplay.from_estimator(classifier, X, y)
else:
display = PrecisionRecallDisplay.from_predictions(
y, y_pred, pos_label=pos_label
)
average_precision = average_precision_score(y, y_pred, pos_label=pos_label)
# check that the default name is used
assert display.line_.get_label() == default_label.format(average_precision)
# check that the name can be set
display.plot(name="MySpecialEstimator")
assert (
display.line_.get_label()
== f"MySpecialEstimator (AP = {average_precision:.2f})"
)
@pytest.mark.parametrize(
"clf",
[
make_pipeline(StandardScaler(), LogisticRegression()),
make_pipeline(
make_column_transformer((StandardScaler(), [0, 1])), LogisticRegression()
),
],
)
def test_precision_recall_display_pipeline(pyplot, clf):
X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
with pytest.raises(NotFittedError):
PrecisionRecallDisplay.from_estimator(clf, X, y)
clf.fit(X, y)
display = PrecisionRecallDisplay.from_estimator(clf, X, y)
assert display.estimator_name == clf.__class__.__name__
def test_precision_recall_display_string_labels(pyplot):
# regression test #15738
cancer = load_breast_cancer()
X, y = cancer.data, cancer.target_names[cancer.target]
lr = make_pipeline(StandardScaler(), LogisticRegression())
lr.fit(X, y)
for klass in cancer.target_names:
assert klass in lr.classes_
display = PrecisionRecallDisplay.from_estimator(lr, X, y)
y_pred = lr.predict_proba(X)[:, 1]
avg_prec = average_precision_score(y, y_pred, pos_label=lr.classes_[1])
assert display.average_precision == pytest.approx(avg_prec)
assert display.estimator_name == lr.__class__.__name__
err_msg = r"y_true takes value in {'benign', 'malignant'}"
with pytest.raises(ValueError, match=err_msg):
PrecisionRecallDisplay.from_predictions(y, y_pred)
display = PrecisionRecallDisplay.from_predictions(
y, y_pred, pos_label=lr.classes_[1]
)
assert display.average_precision == pytest.approx(avg_prec)
@pytest.mark.parametrize(
"average_precision, estimator_name, expected_label",
[
(0.9, None, "AP = 0.90"),
(None, "my_est", "my_est"),
(0.8, "my_est2", "my_est2 (AP = 0.80)"),
],
)
def test_default_labels(pyplot, average_precision, estimator_name, expected_label):
"""Check the default labels used in the display."""
precision = np.array([1, 0.5, 0])
recall = np.array([0, 0.5, 1])
display = PrecisionRecallDisplay(
precision,
recall,
average_precision=average_precision,
estimator_name=estimator_name,
)
display.plot()
assert display.line_.get_label() == expected_label
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
def test_plot_precision_recall_pos_label(pyplot, constructor_name, response_method):
# check that we can provide the positive label and display the proper
# statistics
X, y = load_breast_cancer(return_X_y=True)
# create an highly imbalanced version of the breast cancer dataset
idx_positive = np.flatnonzero(y == 1)
idx_negative = np.flatnonzero(y == 0)
idx_selected = np.hstack([idx_negative, idx_positive[:25]])
X, y = X[idx_selected], y[idx_selected]
X, y = shuffle(X, y, random_state=42)
# only use 2 features to make the problem even harder
X = X[:, :2]
y = np.array(["cancer" if c == 1 else "not cancer" for c in y], dtype=object)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
stratify=y,
random_state=0,
)
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
# sanity check to be sure the positive class is classes_[0] and that we
# are betrayed by the class imbalance
assert classifier.classes_.tolist() == ["cancer", "not cancer"]
y_pred = getattr(classifier, response_method)(X_test)
# we select the correcponding probability columns or reverse the decision
# function otherwise
y_pred_cancer = -1 * y_pred if y_pred.ndim == 1 else y_pred[:, 0]
y_pred_not_cancer = y_pred if y_pred.ndim == 1 else y_pred[:, 1]
if constructor_name == "from_estimator":
display = PrecisionRecallDisplay.from_estimator(
classifier,
X_test,
y_test,
pos_label="cancer",
response_method=response_method,
)
else:
display = PrecisionRecallDisplay.from_predictions(
y_test,
y_pred_cancer,
pos_label="cancer",
)
# we should obtain the statistics of the "cancer" class
avg_prec_limit = 0.65
assert display.average_precision < avg_prec_limit
assert -np.trapz(display.precision, display.recall) < avg_prec_limit
# otherwise we should obtain the statistics of the "not cancer" class
if constructor_name == "from_estimator":
display = PrecisionRecallDisplay.from_estimator(
classifier,
X_test,
y_test,
response_method=response_method,
pos_label="not cancer",
)
else:
display = PrecisionRecallDisplay.from_predictions(
y_test,
y_pred_not_cancer,
pos_label="not cancer",
)
avg_prec_limit = 0.95
assert display.average_precision > avg_prec_limit
assert -np.trapz(display.precision, display.recall) > avg_prec_limit
| {
"content_hash": "1fce328e56a2fea98ad1b66904ee5b99",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 88,
"avg_line_length": 38.1546052631579,
"alnum_prop": 0.676868695577205,
"repo_name": "sergeyf/scikit-learn",
"id": "165e2b75df36edddd7822c11a8ae95ca45b6d8e1",
"size": "11599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/metrics/_plot/tests/test_precision_recall_display.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "718114"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "9906683"
},
{
"name": "Shell",
"bytes": "49565"
}
],
"symlink_target": ""
} |
from setuptools import setup
from odk_tools import __version__
setup(
name="odk_tools",
version=__version__,
description="Tools for working with ODK XLSForms.",
url="https://github.com/lindsay-stevens/",
author="Lindsay Stevens",
author_email="lindsay.stevens.au@gmail.com",
packages=['odk_tools'],
test_suite='tests',
include_package_data=True,
license="MIT",
install_requires=[
# see requirements.txt
],
keywords="odk",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.5",
],
)
| {
"content_hash": "5533a6c7962814e61bd63597cd39b7bd",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 55,
"avg_line_length": 28.28,
"alnum_prop": 0.6223479490806223,
"repo_name": "lindsay-stevens/odk_tools",
"id": "09167eefa2708cdcac2400477eceb256a50fc62a",
"size": "707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "147895"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
} |
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
from fuzzy_modeling.models.utils import PyFuzzyMixin
from fuzzy.System import System
User = get_user_model()
class SystemModel(models.Model, PyFuzzyMixin):
"""
A Fuzzy system model
"""
class Meta:
app_label = 'fuzzy_modeling'
name = models.CharField(_("Name"), blank=False, null=False, max_length=250)
description = models.TextField(_("Description"))
user = models.ForeignKey(User,
blank=True,
null=True,
related_name='systems'
)
def __unicode__(self):
return self.description
def get_pyfuzzy(self):
"""
Return the Pyfuzzy class of this model
"""
system = System(description=self.description)
for ivar in self.inputvariablemodel_set.all():
fuzzy_var = ivar.get_pyfuzzy()
system.variables[ivar.name] = fuzzy_var
for ovar in self.outputvariablemodel_set.all():
fuzzy_var = ovar.get_pyfuzzy()
system.variables[ovar.name] = fuzzy_var
for rule in self.rulemodel_set.all():
fuzzy_rule = rule.get_pyfuzzy(system)
system.rules[rule.name] = fuzzy_rule
return system
@classmethod
def from_pyfuzzy(cls, pyfuzzy):
"""
Return the model representation of an instance of the pyfuzzy attr
"""
system_model = cls()
system_model.description = pyfuzzy.description
system_model.save()
#variables
for v_name, pyfuzzy_var in pyfuzzy.variables.items():
# set the name of this var inside the pyfuzzy_var
pyfuzzy_var.name = v_name
# is an output variable
if pyfuzzy_var.__class__.__name__ == 'OutputVariable':
OutputvarModel = system_model.outputvariablemodel_set.model
outputvar_model = OutputvarModel.from_pyfuzzy(pyfuzzy_var)
system_model.outputvariablemodel_set.add(outputvar_model)
# is an input variable
else:
InputvarModel = system_model.inputvariablemodel_set.model
inputvar_model = InputvarModel.from_pyfuzzy(pyfuzzy_var)
system_model.inputvariablemodel_set.add(inputvar_model)
# rules
for r_name, rule in pyfuzzy.rules.items():
# set the name of this var to be used latter
rule.name = r_name
rule_model = system_model.rulemodel_set.model.from_pyfuzzy(rule, pyfuzzy, system_model)
system_model.rulemodel_set.add(rule_model)
system_model.save()
return system_model
| {
"content_hash": "2411c479a3c1d3e5b628c0391450d630",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 99,
"avg_line_length": 32.28089887640449,
"alnum_prop": 0.6038983640793596,
"repo_name": "arruda/cloudfuzzy",
"id": "59ddd8d663275b5c59c86c89d8f1f67791c30646",
"size": "2898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuzzy_modeling/models/systems.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "189161"
},
{
"name": "CoffeeScript",
"bytes": "4335"
},
{
"name": "JavaScript",
"bytes": "50500"
},
{
"name": "Python",
"bytes": "166778"
},
{
"name": "Ruby",
"bytes": "956"
},
{
"name": "Shell",
"bytes": "6348"
}
],
"symlink_target": ""
} |
import unittest
import mxnet as mx
import numpy as np
from converter._mxnet_converter import convert
from collections import namedtuple
from converter import utils
def _mxnet_remove_batch(input_data):
for blob in input_data:
input_data[blob] = np.reshape(input_data[blob], input_data[blob].shape[1:])
return input_data
def _get_mxnet_module(net, data_shapes, mode, label_names, input_names=None):
""" Given a symbolic graph, input shape and the initialization mode,
returns an MXNet module.
"""
mx.random.seed(1993)
mod = utils.create_module(sym=net, data_shapes=data_shapes, label_shapes=input_names,
label_names=label_names)
if mode == 'random':
mod.init_params(
initializer=mx.init.Uniform(scale=.1)
)
elif mode == 'zeros':
mod.init_params(
initializer=mx.init.Zero()
)
elif mode == 'ones':
mod.init_params(
initializer=mx.init.One()
)
else:
Exception(KeyError("%s is not a valid initialization mode" % mode))
return mod
class SingleLayerTest(unittest.TestCase):
"""
Unit test class for testing where converter is able to convert individual layers or not.
In order to do so, it converts model and generates preds on both CoreML and MXNet and check
they are the same.
"""
def _test_mxnet_model(self, net, input_shape, mode, class_labels=None,
coreml_mode=None, label_names=None, delta=1e-2,
pre_processing_args=None, input_name='data'):
""" Helper method that convert the CoreML model into CoreML and compares the predictions
over random data.
Parameters
----------
net: MXNet Symbol Graph
The graph that we'll be converting into CoreML.
input_shape: tuple of ints
The shape of input data. Generally of the format (batch-size, channels, height, width)
mode: (random|zeros|ones)
The mode to use in order to set the parameters (weights and biases).
label_names: list of strings
The names of the output labels. Default: None
delta: float
The maximum difference b/w predictions of MXNet and CoreML that is tolerable.
input_name: str
The name of the input variable to the symbolic graph.
"""
data_shapes = [(input_name, input_shape)]
mod = _get_mxnet_module(net, data_shapes, mode, label_names)
# Generate some dummy data
input_data = {input_name: np.random.uniform(-10., 10., input_shape)}
Batch = namedtuple('Batch', ['data'])
mod.forward(Batch([mx.nd.array(input_data[input_name])]))
mxnet_preds = mod.get_outputs()[0].asnumpy().flatten()
# Get predictions from coreml
coreml_model = convert(
model=mod,
class_labels=class_labels,
mode=coreml_mode,
input_shape={input_name: input_shape},
preprocessor_args=pre_processing_args
)
coreml_preds = coreml_model.predict(_mxnet_remove_batch(input_data)).values()[0].flatten()
# Check prediction accuracy
self.assertEquals(len(mxnet_preds), len(coreml_preds))
for i in range(len(mxnet_preds)):
self.assertAlmostEquals(mxnet_preds[i], coreml_preds[i], delta=delta)
def test_tiny_inner_product_zero_input(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
self._test_mxnet_model(net, input_shape=input_shape, mode='zeros')
def test_really_tiny_inner_product_ones_input(self):
np.random.seed(1988)
input_shape = (1, 1)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=1)
self._test_mxnet_model(net, input_shape=input_shape, mode='ones')
def test_really_tiny_2_inner_product_ones_input(self):
np.random.seed(1988)
input_shape = (1, 1)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
self._test_mxnet_model(net, input_shape=input_shape, mode='ones')
def test_tiny_inner_product_ones_input(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
self._test_mxnet_model(net, input_shape=input_shape, mode='ones', delta=0.05)
def test_tiny_inner_product_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_inner_product_no_bias(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5, no_bias=True)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_softmax_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.SoftmaxOutput(net, name='softmax')
self._test_mxnet_model(net, input_shape=input_shape, mode='random',
label_names=['softmax_label'])
def test_tiny_relu_activation_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.Activation(net, name='relu1', act_type="relu")
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_sigmoid_activation_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.Activation(net, name='sigmoid1', act_type="sigmoid")
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_tanh_activation_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
# Define a model
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.Activation(net, name='tanh1', act_type="tanh")
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_really_tiny_conv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (1 ,1)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_conv_ones_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='ones', delta=0.05)
def test_tiny_conv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_asym_conv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5 ,3)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_asym_conv_random_asym_input(self):
np.random.seed(1988)
input_shape = (1, 1, 28, 18)
num_filter = 16
kernel = (5, 3)
stride = (1, 1)
pad = (0, 0)
dilate = (1, 1)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1',
dilate=dilate)
net = mx.sym.Activation(net, name='tanh', act_type="tanh")
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_conv_valid_pooling_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (2, 2)
stride = (2, 2)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
net = mx.symbol.Pooling(
data=net,
kernel=kernel,
stride=stride,
pad=pad,
name='pool_1',
pool_type='avg',
pooling_convention='valid'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_conv_pooling_full_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (2, 2)
stride = (2, 2)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
net = mx.symbol.Pooling(
data=net,
kernel=kernel,
stride=stride,
pad=pad,
name='pool_1',
pool_type='avg',
pooling_convention='full'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_conv_pooling_full_random_input_with_padding(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 2
kernel = (2, 2)
stride = (2, 2)
pad = (1, 1)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
net = mx.symbol.Pooling(
data=net,
kernel=kernel,
stride=stride,
pad=pad,
name='pool_1',
pool_type='avg',
pooling_convention='full'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_really_tiny_conv_random_3d_input(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 1
kernel = (1, 1)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_really_tiny_conv_random_input_multi_filter(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 64
kernel = (1, 1)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_conv_random_3d_input(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 1
kernel = (5 ,5)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_conv_random_input_multi_filter(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 64
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_conv_random(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 64
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_flatten(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 64
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
net = mx.sym.Flatten(data=net, name='flatten1')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.SoftmaxOutput(net, name='softmax')
self._test_mxnet_model(net, input_shape=input_shape, mode='random',
label_names=['softmax_label'])
def test_transpose(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 64
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.sym.transpose(data=net, name='transpose', axes=(0, 1, 2, 3))
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_reshape(self):
np.random.seed(1988)
input_shape = (1, 8)
net = mx.sym.Variable('data')
net = mx.sym.reshape(data=net, shape=(1, 2, 2, 2))
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_synset_random_input(self):
np.random.seed(1989)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.SoftmaxOutput(net, name='softmax')
mod = _get_mxnet_module(net, data_shapes=[('data', input_shape)],
mode='random', label_names=['softmax_label'])
# Generate some dummy data
input_data = np.random.uniform(-0.1, 0.1, input_shape)
Batch = namedtuple('Batch', ['data'])
mod.forward(Batch([mx.nd.array(input_data)]))
kwargs = {'input_shape': {'data': input_shape}}
# Get predictions from coreml
coreml_model = convert(
model=mod,
class_labels=['Category1', 'Category2', 'Category3', 'Category4', 'Category5'],
mode='classifier',
**kwargs
)
prediction = coreml_model.predict(
_mxnet_remove_batch({'data': input_data}))
self.assertEqual(prediction['classLabel'], 'Category3')
def test_really_tiny_deconv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (1, 1)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_deconv_ones_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='ones', delta=0.05)
def test_tiny_deconv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_asym_deconv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5, 3)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_asym_deconv_random_asym_input(self):
np.random.seed(1988)
input_shape = (1, 1, 28, 18)
num_filter = 16
kernel = (5, 3)
stride = (1, 1)
pad = (0, 0)
dilate = (1, 1)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
dilate=dilate,
name='deconv_1'
)
net = mx.sym.Activation(net, name = 'tanh', act_type = "tanh")
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_deconv_pooling_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
net = mx.symbol.Pooling(
data=net,
kernel=kernel,
stride=stride,
pad=pad,
name='pool_1',
pool_type='max'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_really_tiny_deconv_random_3d_input(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 1
kernel = (1, 1)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_really_tiny_deconv_random_input_multi_filter(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 64
kernel = (1, 1)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_deconv_random_3d_input(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 1
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_deconv_random_input_multi_filter(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 64
kernel = (5 ,5)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_deconv_random(self):
np.random.seed(1988)
input_shape = (1, 10, 4, 4)
num_filter = 3
kernel = (2, 2)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=False,
name='deconv_1'
)
# test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_deconv_random_output_shape(self):
np.random.seed(1988)
input_shape = (1, 10, 4, 4)
num_filter = 3
kernel = (2, 2)
stride = (1, 1)
pad = (0, 0)
target_shape = (5, 5)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=False,
target_shape=target_shape,
name='deconv_1'
)
# test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_deconv_random_padding(self):
np.random.seed(1988)
input_shape = (1, 10, 9, 9)
num_filter = 3
kernel = (3, 3)
stride = (3, 3)
pad = (2, 2)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=False,
name='deconv_1')
# test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_conv_random_padding_odd(self):
np.random.seed(1988)
input_shape = (1, 10, 6, 6)
num_filter = 3
kernel = (5, 5)
stride = (1, 1)
pad = (3, 3)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=False,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_conv_random_padding_even(self):
np.random.seed(1988)
input_shape = (1, 10, 6, 6)
num_filter = 3
kernel = (5, 5)
stride = (1, 1)
pad = (2, 2)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=False,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_deconv_random_all_inputs(self):
np.random.seed(1988)
input_shape = (1, 10, 5, 5)
num_filter = 3
kernel = (3, 3)
stride = (2, 2)
pad = (1, 1)
dilate = (1, 1)
target_shape = (11, 11)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=False,
target_shape=target_shape,
dilate=dilate,
name='deconv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_batch_norm(self):
np.random.seed(1988)
input_shape = (1, 1, 2, 3)
net = mx.sym.Variable('data')
gamma = mx.sym.Variable('gamma')
beta = mx.sym.Variable('beta')
moving_mean = mx.sym.Variable('moving_mean')
moving_var = mx.sym.Variable('moving_var')
net = mx.symbol.BatchNorm(
data=net,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
use_global_stats=True,
name='batch_norm_1')
self._test_mxnet_model(net, input_shape=input_shape, mode='random', delta=1e-2)
def test_batch_norm_no_global_stats(self):
""" This test should throw an exception since converter doesn't support
use_global_stats=False). The reason for this is CoreML doesn't support
local batch stats.
"""
np.random.seed(1988)
input_shape = (1, 1, 2, 3)
net = mx.sym.Variable('data')
gamma = mx.sym.Variable('gamma')
beta = mx.sym.Variable('beta')
moving_mean = mx.sym.Variable('moving_mean')
moving_var = mx.sym.Variable('moving_var')
net = mx.symbol.BatchNorm(
data=net,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
use_global_stats=False,
name='batch_norm_1')
self._test_mxnet_model(net, input_shape=input_shape, mode='random', delta=1e-2)
def test_pre_processing_args(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.SoftmaxOutput(net, name='softmax')
self._test_mxnet_model(net, input_shape=input_shape, mode='random',
label_names=['softmax_label'],
pre_processing_args={'red_bias': 0,
'blue_bias': 0,
'green_bias': 0,
'image_scale': 1})
def test_different_input_variables(self):
"""
Verifying the behavior when input variable name is different than the
standard name - 'data'.
"""
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data1')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
self._test_mxnet_model(net, input_shape=input_shape, mode='zeros', input_name='data1')
def test_really_tiny_conv_optional_params(self):
"""
Verifying the behavior of a convolutional layer when stride and pad
are not provided.
"""
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (1, 1)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
# TODO test_concat
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(SingleLayerTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "1ae23736e35f396ceafc2c422f955cc6",
"timestamp": "",
"source": "github",
"line_count": 973,
"max_line_length": 98,
"avg_line_length": 31.585817060637204,
"alnum_prop": 0.521882016073927,
"repo_name": "mlperf/training_results_v0.6",
"id": "5d26c5faf754371331bf247c5c79eb03665a19c9",
"size": "31519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/tools/coreml/test/test_mxnet_converter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
} |
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.api_schema.response.compute.v2_1 import migrations as schema
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class MigrationsClient(base_compute_client.BaseComputeClient):
def list_migrations(self, **params):
"""List all migrations.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#returnmigrations
"""
url = 'os-migrations'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_migrations, resp, body)
return rest_client.ResponseBody(resp, body)
| {
"content_hash": "92634d6770fd9e8954e9a8a834d721b9",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 34.12,
"alnum_prop": 0.6764361078546307,
"repo_name": "HybridF5/tempest",
"id": "5eae8aaa85d8c55c578ddb8d2657e83c1d743f30",
"size": "1462",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tempest/lib/services/compute/migrations_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3636851"
},
{
"name": "Shell",
"bytes": "8175"
}
],
"symlink_target": ""
} |
"""Prints the version of bob and exits
"""
def main():
"""Main routine, called by the script that gets the configuration of bob.blitz"""
import bob.blitz
print (bob.blitz.get_config())
return 0
| {
"content_hash": "d057725d126e5de43c542b1ab870e241",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 83,
"avg_line_length": 20.5,
"alnum_prop": 0.6878048780487804,
"repo_name": "tiagofrepereira2012/bob.extension",
"id": "c5efe3d1aad02442f6f5df98523ba180deb8a8be",
"size": "991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/bob.example.project/bob/example/project/script/version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "392"
},
{
"name": "C++",
"bytes": "49679"
},
{
"name": "Python",
"bytes": "181716"
},
{
"name": "Shell",
"bytes": "1695"
}
],
"symlink_target": ""
} |
"""
This module defines constants that are intended to be imported into the Flask
app configuration by default.
"""
# flask.config settings.
DEBUG = False
SECRET_KEY = 'A0gjhsd3678HK'
# Misc settings.
HOST = '0.0.0.0'
LOGGING_CONF = 'mongows/configs/logging.yaml'
MONGOHQ_URL = 'http://localhost:27017/mws'
NO_SAMPLE = False
PORT = 5000
| {
"content_hash": "976d8f87eeda6fa518ba9fa6bc544272",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 77,
"avg_line_length": 22.6,
"alnum_prop": 0.7345132743362832,
"repo_name": "mcomella/mongo-web-shell",
"id": "f5493ecf66a021371b5f5f0e5df8fd2472650dfc",
"size": "339",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mongows/configs/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "92711"
},
{
"name": "Python",
"bytes": "15214"
},
{
"name": "Shell",
"bytes": "648"
}
],
"symlink_target": ""
} |
"""
Tests specific to the lines module.
"""
from nose.tools import assert_true
from timeit import repeat
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup, image_comparison
@cleanup
def test_invisible_Line_rendering():
"""
Github issue #1256 identified a bug in Line.draw method
Despite visibility attribute set to False, the draw method was not
returning early enough and some pre-rendering code was executed
though not necessary.
Consequence was an excessive draw time for invisible Line instances
holding a large number of points (Npts> 10**6)
"""
# Creates big x and y data:
N = 10**7
x = np.linspace(0,1,N)
y = np.random.normal(size=N)
# Create a plot figure:
fig = plt.figure()
ax = plt.subplot(111)
# Create a "big" Line instance:
l = mpl.lines.Line2D(x,y)
l.set_visible(False)
# but don't add it to the Axis instance `ax`
# [here Interactive panning and zooming is pretty responsive]
# Time the canvas drawing:
t_no_line = min(repeat(fig.canvas.draw, number=1, repeat=3))
# (gives about 25 ms)
# Add the big invisible Line:
ax.add_line(l)
# [Now interactive panning and zooming is very slow]
# Time the canvas drawing:
t_unvisible_line = min(repeat(fig.canvas.draw, number=1, repeat=3))
# gives about 290 ms for N = 10**7 pts
slowdown_factor = (t_unvisible_line/t_no_line)
slowdown_threshold = 2 # trying to avoid false positive failures
assert_true(slowdown_factor < slowdown_threshold)
@cleanup
def test_set_line_coll_dash():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
np.random.seed(0)
# Testing setting linestyles for line collections.
# This should not produce an error.
cs = ax.contour(np.random.randn(20, 30), linestyles=[(0, (3, 3))])
assert True
@image_comparison(baseline_images=['line_collection_dashes'], remove_text=True)
def test_set_line_coll_dash_image():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
np.random.seed(0)
cs = ax.contour(np.random.randn(20, 30), linestyles=[(0, (3, 3))])
| {
"content_hash": "324c83bbd7366ac607ced44262e3565e",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 79,
"avg_line_length": 28.81578947368421,
"alnum_prop": 0.6748858447488585,
"repo_name": "RobertABT/heightmap",
"id": "6eefd1e5353df371e3463814c9017043c9ae2916",
"size": "2190",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/matplotlib/lib/matplotlib/tests/test_lines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "25165856"
},
{
"name": "C++",
"bytes": "5251754"
},
{
"name": "CSS",
"bytes": "17123"
},
{
"name": "FORTRAN",
"bytes": "6353469"
},
{
"name": "JavaScript",
"bytes": "816504"
},
{
"name": "M",
"bytes": "66"
},
{
"name": "Matlab",
"bytes": "4280"
},
{
"name": "Objective-C",
"bytes": "284551"
},
{
"name": "Python",
"bytes": "13223936"
},
{
"name": "TeX",
"bytes": "37261"
}
],
"symlink_target": ""
} |
"""
lookupy.tests
~~~~~~~~~~~~~
This module contains tests for the lookupy module written using
nose to be run using::
$ nosetests -v
"""
import re
from nose.tools import assert_list_equal, assert_equal, assert_raises
from .lookupy import filter_items, lookup, include_keys, Q, QuerySet, \
Collection, LookupyError
from .dunderkey import dunderkey, dunder_partition, dunder_init, dunder_last, \
dunder_get, undunder_keys, dunder_truncate
entries_fixtures = [{'request': {'url': 'http://example.com', 'headers': [{'name': 'Connection', 'value': 'Keep-Alive'}]},
'response': {'status': 404, 'headers': [{'name': 'Date', 'value': 'Thu, 13 Jun 2013 06:43:14 GMT'},
{'name': 'Content-Type', 'value': 'text/html'}]}},
{'request': {'url': 'http://example.org', 'headers': [{'name': 'Connection', 'value': 'Keep-Alive'}]},
'response': {'status': 200, 'headers': [{'name': 'Date', 'value': 'Thu, 13 Jun 2013 06:43:14 GMT'},
{'name': 'Content-Type', 'value': 'text/html'}]}},
{'request': {'url': 'http://example.com/myphoto.jpg', 'headers': [{'name': 'Connection', 'value': 'Keep-Alive'}]},
'response': {'status': 200, 'headers': [{'name': 'Date', 'value': 'Thu, 13 Jun 2013 06:43:14 GMT'},
{'name': 'Content-Type', 'value': 'image/jpg'}]}}]
def fe(entries, *args, **kwargs):
return list(filter_items(entries, *args, **kwargs))
def ik(entries, fields):
return list(include_keys(entries, fields))
## Tests
def test_Collection():
c = Collection(entries_fixtures)
assert_list_equal(list(c), entries_fixtures)
assert_list_equal(list(c), entries_fixtures)
def test_Q():
entries = entries_fixtures
q1 = Q(response__status__exact=404, request__url__contains='.com')
assert q1.evaluate(entries[0])
# test with negation
q2 = ~Q(response__status__exact=404)
assert q2.evaluate(entries[1])
# test multiple application of negation
assert not (~q2).evaluate(entries[1])
q3 = Q(response__status=200)
assert not (q1 & q3).evaluate(entries[0])
assert (q1 | q3).evaluate(entries[0])
assert (~(q1 & q3)).evaluate(entries[0])
assert_list_equal(list(((Q(request__url__endswith='.jpg') | Q(response__status=404)).evaluate(e)
for e in entries)),
[True, False, True])
assert_list_equal(list(((~Q(request__url__endswith='.jpg') | Q(response__status=404)).evaluate(e)
for e in entries)),
[True, True, False])
def test_lookup():
entry1, entry2, entry3 = entries_fixtures
# exact -- works for strings and int
assert lookup('request__url__exact', 'http://example.com', entry1)
assert not lookup('request_url__exact', 'http://example.org', entry1)
assert lookup('response__status__exact', 404, entry1)
assert not lookup('response__status__exact', 404, entry2)
assert lookup('response_unknown__exact', None, entry1)
# neq -- works for strings and ints
assert not lookup('request__url__neq', 'http://example.com', entry1)
assert lookup('request_url__neq', 'http://example.org', entry1)
assert not lookup('response__status__neq', 404, entry1)
assert lookup('response__status__neq', 404, entry2)
assert not lookup('response_unknown__neq', None, entry1)
# contains -- works for strings, else raises error
assert lookup('request__url__contains', '.com', entry1)
assert not lookup('request__url__contains', 'www', entry1)
assert_raises(LookupyError, lookup, 'response__status__contains',
2, entry2)
assert_raises(LookupyError, lookup, 'response__unknown__contains',
None, entry2)
# icontains -- works for strings, else raises error
assert lookup('request__url__icontains', 'EXAMPLE', entry1)
assert not lookup('request__url__icontains', 'www', entry1)
assert_raises(LookupyError, lookup, 'response__status__icontains',
2, entry2)
assert_raises(LookupyError, lookup,
'response__unknown__icontains', None, entry2)
# in -- works for strings and lists, else raises error
assert lookup('request__url__in', ['http://example.com',
'http://blog.example.com'], entry1)
assert lookup('response__status__in', [400, 200], entry2)
assert not lookup('response__status__in', [], entry2)
assert lookup('request__url__in', 'http://example.com/?q=hello', entry1)
assert_raises(LookupyError, lookup, 'response__status__in', 404, entry1)
# startswith -- works for strings, else raises error
assert lookup('request__url__startswith', 'http://', entry1)
assert not lookup('request__url__startswith', 'HTTP://', entry1)
assert_raises(LookupyError, lookup,
'response__status__startswith', 4, entry1)
# istartswith -- works for strings, else raises error
assert lookup('request__url__istartswith', 'http://', entry1)
assert lookup('request__url__istartswith', 'HTTP://', entry1)
assert_raises(LookupyError, lookup,
'response__status__istartswith', 4, entry1)
# endswith -- works for strings, else raises error
assert lookup('request__url__endswith', '.jpg', entry3)
assert not lookup('request__url__endswith', '.JPG', entry3)
assert_raises(LookupyError, lookup, 'response__status__endswith',
0, entry3)
# iendswith -- works for strings, else raises error
assert lookup('request__url__iendswith', '.jpg', entry3)
assert lookup('request__url__iendswith', '.JPG', entry3)
assert_raises(LookupyError, lookup, 'response__status__iendswith',
0, entry3)
# gt -- works for strings and int
assert lookup('response__status__gt', 200, entry1)
assert not lookup('response__status__gt', 404, entry1)
assert lookup('request__url__gt', 'ftp://example.com', entry1)
assert not lookup('request__url__gt', 'http://example.com', entry1)
# gte -- works for strings and int
assert lookup('response__status__gte', 200, entry1)
assert lookup('response__status__gte', 404, entry1)
assert lookup('request__url__gte', 'ftp://example.com', entry1)
assert lookup('request__url__gte', 'http://example.com', entry1)
# lt -- works for strings and int
assert lookup('response__status__lt', 301, entry2)
assert not lookup('response__status__lt', 200, entry2)
assert lookup('request__url__lt', 'ws://example.com', entry2)
assert not lookup('request__url__lt', 'http://example.org', entry2)
# lte -- works for strings and int
assert lookup('response__status__lte', 301, entry2)
assert lookup('response__status__lte', 200, entry2)
assert lookup('request__url__lte', 'ws://example.com', entry2)
assert lookup('request__url__lte', 'http://example.org', entry2)
# regex -- works for compiled patterns and strings
pattern = r'^http:\/\/.+g$'
assert lookup('request__url__regex', pattern, entry2)
assert lookup('request__url__regex', pattern, entry3)
assert not lookup('request__url__regex', pattern, entry1)
compiled_pattern = re.compile(pattern)
assert lookup('request__url__regex', compiled_pattern, entry2)
assert lookup('request__url__regex', compiled_pattern, entry3)
assert not lookup('request__url__regex', compiled_pattern, entry1)
# filter -- works for Q objects, else raises error
assert lookup('response__headers__filter',
Q(name__exact='Content-Type', value__exact='image/jpg'),
entry3)
assert not lookup('response__headers__filter',
Q(name__exact='Content-Type', value__exact='text/html'),
entry3)
assert_raises(LookupyError, lookup, 'response__headers__filter',
0, entry3)
assert_raises(LookupyError, lookup, 'response__headers__filter',
"hello", entry3)
assert_raises(LookupyError, lookup, 'response__headers__filter',
None, entry3)
assert_raises(LookupyError, lookup, 'response__headers__filter',
{'a': 'b'}, entry3)
assert_raises(LookupyError, lookup, 'response__status__filter',
Q(name__exact='Content-Type', value__exact='image/jpg'),
entry3)
# nothing -- works for strings and int
assert lookup('request__url', 'http://example.com', entry1)
assert not lookup('request_url', 'http://example.org', entry1)
assert lookup('response__status', 404, entry1)
assert not lookup('response__status', 404, entry2)
assert lookup('response_unknown', None, entry1)
def test_filter_items():
entries = entries_fixtures
# when no lookup kwargs passed, all entries are returned
assert_list_equal(fe(entries), entries)
# simple 1st level lookups
assert_list_equal(fe(entries, request__url='http://example.com'), entries[0:1])
assert_list_equal(fe(entries, response__status=200), entries[1:])
assert len(fe(entries, response__status=405)) == 0
# testing compund lookups
assert len(fe(entries, Q(request__url__exact='http://example.org'))) == 1
assert len(fe(entries,
Q(request__url__exact='http://example.org', response__status=200)
|
Q(request__url__endswith='.com', response__status=404))) == 2
assert len(fe(entries,
~Q(request__url__exact='http://example.org', response__status__gte=500)
|
Q(request__url__endswith='.com', response__status=404))) == 3
assert len(fe(entries,
~Q(request__url__exact='http://example.org', response__status__gte=500)
|
Q(request__url__endswith='.com', response__status=404),
response__status__exact=200)) == 2
def test_include_keys():
entries = entries_fixtures
assert_list_equal(ik(entries, ['request']),
[{'request': {'url': 'http://example.com', 'headers': [{'name': 'Connection', 'value': 'Keep-Alive'}]}},
{'request': {'url': 'http://example.org', 'headers': [{'name': 'Connection', 'value': 'Keep-Alive'}]}},
{'request': {'url': 'http://example.com/myphoto.jpg', 'headers': [{'name': 'Connection', 'value': 'Keep-Alive'}]}}])
assert_list_equal(ik(entries, ['response__status']),
[{'response__status': 404},
{'response__status': 200},
{'response__status': 200}])
# when an empty list is passed as fields
assert_list_equal(ik(entries, []), [{},{},{}])
# when a non-existent key is passed in fields
assert_list_equal(ik(entries, ['response__status', 'cookies']),
[{'response__status': 404, 'cookies': None},
{'response__status': 200, 'cookies': None},
{'response__status': 200, 'cookies': None}])
def test_Collection_QuerySet():
data = [{'framework': 'Django', 'language': 'Python', 'type': 'full-stack'},
{'framework': 'Flask', 'language': 'Python', 'type': 'micro'},
{'framework': 'Rails', 'language': 'Ruby', 'type': 'full-stack'},
{'framework': 'Sinatra', 'language': 'Ruby', 'type': 'micro'},
{'framework': 'Zend', 'language': 'PHP', 'type': 'full-stack'},
{'framework': 'Slim', 'language': 'PHP', 'type': 'micro'}]
c = Collection(data)
r1 = c.filter(framework__startswith='S')
assert isinstance(r1, QuerySet)
assert len(list(r1)) == 2
r2 = c.filter(Q(language__exact='Python') | Q(language__exact='Ruby'))
assert len(list(r2)) == 4
r3 = c.filter(language='PHP')
assert_list_equal(list(r3.select('framework', 'type')),
[{'framework': 'Zend', 'type': 'full-stack'},
{'framework': 'Slim', 'type': 'micro'}])
r4 = c.filter(Q(language__exact='Python') | Q(language__exact='Ruby'))
assert_list_equal(list(r4.select('framework')),
[{'framework': 'Django'},
{'framework': 'Flask'},
{'framework': 'Rails'},
{'framework': 'Sinatra'}])
# :todo: test with flatten=True
r5 = c.filter(framework__startswith='S').select('framework', 'somekey')
assert_list_equal(list(r5),
[{'framework': 'Sinatra', 'somekey': None},
{'framework': 'Slim', 'somekey': None}])
## nesdict tests
def test_dunderkey():
assert dunderkey('a', 'b', 'c') == 'a__b__c'
assert dunderkey('a') == 'a'
assert dunderkey('name', 'school_name') == 'name__school_name'
def test_dunder_partition():
assert dunder_partition('a__b') == ('a', 'b')
assert dunder_partition('a__b__c') == ('a__b', 'c')
assert dunder_partition('a') == ('a', None)
def test_dunder_init():
assert dunder_init('a__b') == 'a'
assert dunder_init('a__b__c') == 'a__b'
assert dunder_init('a') == 'a'
def test_dunder_last():
assert dunder_last('a__b') == 'b'
assert dunder_last('a__b__c') == 'c'
assert dunder_last('a') == None
def test_dunder_get():
d = dict([('a', 'A'),
('p', {'q': 'Q'}),
('x', {'y': {'z': 'Z'}})])
assert dunder_get(d, 'a') == 'A'
assert dunder_get(d, 'p__q') == 'Q'
assert dunder_get(d, 'x__y__z') == 'Z'
def test_undunder_keys():
entry = {'request__url': 'http://example.com', 'request__headers': [{'name': 'Connection', 'value': 'Keep-Alive',}],
'response__status': 404, 'response__headers': [{'name': 'Date', 'value': 'Thu, 13 Jun 2013 06:43:14 GMT'}]}
assert_equal(undunder_keys(entry),
{'request': {'url': 'http://example.com', 'headers': [{'name': 'Connection', 'value': 'Keep-Alive',}]},
'response': {'status': 404, 'headers': [{'name': 'Date', 'value': 'Thu, 13 Jun 2013 06:43:14 GMT'}]}})
def test_dunder_truncate():
entry = {'request__url': 'http://example.com', 'request__headers': [{'name': 'Connection', 'value': 'Keep-Alive',}],
'response__status': 404, 'response__headers': [{'name': 'Date', 'value': 'Thu, 13 Jun 2013 06:43:14 GMT'}]}
assert_equal(dunder_truncate(entry),
{'url': 'http://example.com',
'request__headers': [{'name': 'Connection', 'value': 'Keep-Alive',}],
'status': 404,
'response__headers': [{'name': 'Date', 'value': 'Thu, 13 Jun 2013 06:43:14 GMT'}]})
| {
"content_hash": "1f5fd506c193b7fdab037a201315c429",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 139,
"avg_line_length": 44.50149253731343,
"alnum_prop": 0.5738529648510866,
"repo_name": "naiquevin/lookupy",
"id": "1f15a091f08362e860f8344d2bcbb1c5f0e785a4",
"size": "14908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lookupy/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34735"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "spmurraydev.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "585a38dfd5038cd1e88be234f8d8bee8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 75,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.7155172413793104,
"repo_name": "hijonathan/spmurraydev.com",
"id": "7bc47c71d97e69bd4e87b38cb99e8f99e000774a",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import os
import re
import uuid
import copy
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.contrib.hooks.gcp_dataflow_hook import DataFlowHook
from airflow.models import BaseOperator
from airflow.version import version
from airflow.utils.decorators import apply_defaults
class DataFlowJavaOperator(BaseOperator):
"""
Start a Java Cloud DataFlow batch job. The parameters of the operation
will be passed to the job.
**Example**: ::
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date':
(2016, 8, 1),
'email': ['alex@vanboxel.be'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=30),
'dataflow_default_options': {
'project': 'my-gcp-project',
'zone': 'us-central1-f',
'stagingLocation': 'gs://bucket/tmp/dataflow/staging/',
}
}
dag = DAG('test-dag', default_args=default_args)
task = DataFlowJavaOperator(
gcp_conn_id='gcp_default',
task_id='normalize-cal',
jar='{{var.value.gcp_dataflow_base}}pipeline-ingress-cal-normalize-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY'
},
dag=dag)
.. seealso::
For more detail on job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param jar: The reference to a self executing DataFlow jar (templated).
:type jar: str
:param job_name: The 'jobName' to use when executing the DataFlow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` in ``options`` will be overwritten.
:type job_name: str
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.
:type options: dict
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:type poll_sleep: int
:param job_class: The name of the dataflow job class to be executed, it
is often not the main class configured in the dataflow jar file.
:type job_class: str
``jar``, ``options``, and ``job_name`` are templated so you can use variables in them.
Note that both
``dataflow_default_options`` and ``options`` will be merged to specify pipeline
execution parameter, and ``dataflow_default_options`` is expected to save
high-level options, for instances, project and zone information, which
apply to all dataflow operators in the DAG.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
.. code-block:: python
default_args = {
'dataflow_default_options': {
'project': 'my-gcp-project',
'zone': 'europe-west1-d',
'stagingLocation': 'gs://my-staging-bucket/staging/'
}
}
You need to pass the path to your dataflow as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar (see documentation here:
https://beam.apache.org/documentation/runners/dataflow/#self-executing-jar).
Use ``options`` to pass on options to your job.
.. code-block:: python
t1 = DataFlowJavaOperator(
task_id='datapflow_example',
jar='{{var.value.gcp_dataflow_base}}pipeline/build/libs/pipeline-example-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY',
'labels': {'foo' : 'bar'}
},
gcp_conn_id='gcp-airflow-service-account',
dag=my-dag)
"""
template_fields = ['options', 'jar', 'job_name']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
jar,
job_name='{{task.task_id}}',
dataflow_default_options=None,
options=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
poll_sleep=10,
job_class=None,
*args,
**kwargs):
super(DataFlowJavaOperator, self).__init__(*args, **kwargs)
dataflow_default_options = dataflow_default_options or {}
options = options or {}
options.setdefault('labels', {}).update(
{'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')})
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.jar = jar
self.job_name = job_name
self.dataflow_default_options = dataflow_default_options
self.options = options
self.poll_sleep = poll_sleep
self.job_class = job_class
def execute(self, context):
bucket_helper = GoogleCloudBucketHelper(
self.gcp_conn_id, self.delegate_to)
self.jar = bucket_helper.google_cloud_to_local(self.jar)
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep)
dataflow_options = copy.copy(self.dataflow_default_options)
dataflow_options.update(self.options)
hook.start_java_dataflow(self.job_name, dataflow_options,
self.jar, self.job_class)
class DataflowTemplateOperator(BaseOperator):
"""
Start a Templated Cloud DataFlow batch job. The parameters of the operation
will be passed to the job.
:param template: The reference to the DataFlow template.
:type template: str
:param job_name: The 'jobName' to use when executing the DataFlow template
(templated).
:param dataflow_default_options: Map of default job environment options.
:type dataflow_default_options: dict
:param parameters: Map of job specific parameters for the template.
:type parameters: dict
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:type poll_sleep: int
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
.. seealso::
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
.. code-block:: python
default_args = {
'dataflow_default_options': {
'project': 'my-gcp-project',
'region': 'europe-west1',
'zone': 'europe-west1-d',
'tempLocation': 'gs://my-staging-bucket/staging/',
}
}
}
You need to pass the path to your dataflow template as a file reference with the
``template`` parameter. Use ``parameters`` to pass on parameters to your job.
Use ``environment`` to pass on runtime environment variables to your job.
.. code-block:: python
t1 = DataflowTemplateOperator(
task_id='datapflow_example',
template='{{var.value.gcp_dataflow_base}}',
parameters={
'inputFile': "gs://bucket/input/my_input.txt",
'outputFile': "gs://bucket/output/my_output.txt"
},
gcp_conn_id='gcp-airflow-service-account',
dag=my-dag)
``template``, ``dataflow_default_options``, ``parameters``, and ``job_name`` are
templated so you can use variables in them.
Note that ``dataflow_default_options`` is expected to save high-level options
for project information, which apply to all dataflow operators in the DAG.
.. seealso::
https://cloud.google.com/dataflow/docs/reference/rest/v1b3
/LaunchTemplateParameters
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
For more detail on job template execution have a look at the reference:
https://cloud.google.com/dataflow/docs/templates/executing-templates
"""
template_fields = ['parameters', 'dataflow_default_options', 'template', 'job_name']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
template,
job_name='{{task.task_id}}',
dataflow_default_options=None,
parameters=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
poll_sleep=10,
*args,
**kwargs):
super(DataflowTemplateOperator, self).__init__(*args, **kwargs)
dataflow_default_options = dataflow_default_options or {}
parameters = parameters or {}
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.dataflow_default_options = dataflow_default_options
self.poll_sleep = poll_sleep
self.template = template
self.job_name = job_name
self.parameters = parameters
def execute(self, context):
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep)
hook.start_template_dataflow(self.job_name, self.dataflow_default_options,
self.parameters, self.template)
class DataFlowPythonOperator(BaseOperator):
"""
Launching Cloud Dataflow jobs written in python. Note that both
dataflow_default_options and options will be merged to specify pipeline
execution parameter, and dataflow_default_options is expected to save
high-level options, for instances, project and zone information, which
apply to all dataflow operators in the DAG.
.. seealso::
For more detail on job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param py_file: Reference to the python dataflow pipeline file.py, e.g.,
/some/local/file/path/to/your/python/pipeline/file.
:type py_file: str
:param job_name: The 'job_name' to use when executing the DataFlow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` or ``'job_name'`` in ``options`` will be overwritten.
:type job_name: str
:param py_options: Additional python options, e.g., ["-m", "-v"].
:type pyt_options: list[str]
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.
:type options: dict
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:type poll_sleep: int
"""
template_fields = ['options', 'dataflow_default_options', 'job_name']
@apply_defaults
def __init__(
self,
py_file,
job_name='{{task.task_id}}',
py_options=None,
dataflow_default_options=None,
options=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
poll_sleep=10,
*args,
**kwargs):
super(DataFlowPythonOperator, self).__init__(*args, **kwargs)
self.py_file = py_file
self.job_name = job_name
self.py_options = py_options or []
self.dataflow_default_options = dataflow_default_options or {}
self.options = options or {}
self.options.setdefault('labels', {}).update(
{'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')})
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.poll_sleep = poll_sleep
def execute(self, context):
"""Execute the python dataflow job."""
bucket_helper = GoogleCloudBucketHelper(
self.gcp_conn_id, self.delegate_to)
self.py_file = bucket_helper.google_cloud_to_local(self.py_file)
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep)
dataflow_options = self.dataflow_default_options.copy()
dataflow_options.update(self.options)
# Convert argument names from lowerCamelCase to snake case.
camel_to_snake = lambda name: re.sub(
r'[A-Z]', lambda x: '_' + x.group(0).lower(), name)
formatted_options = {camel_to_snake(key): dataflow_options[key]
for key in dataflow_options}
hook.start_python_dataflow(
self.job_name, formatted_options,
self.py_file, self.py_options)
class GoogleCloudBucketHelper(object):
"""GoogleCloudStorageHook helper class to download GCS object."""
GCS_PREFIX_LENGTH = 5
def __init__(self,
gcp_conn_id='google_cloud_default',
delegate_to=None):
self._gcs_hook = GoogleCloudStorageHook(gcp_conn_id, delegate_to)
def google_cloud_to_local(self, file_name):
"""
Checks whether the file specified by file_name is stored in Google Cloud
Storage (GCS), if so, downloads the file and saves it locally. The full
path of the saved file will be returned. Otherwise the local file_name
will be returned immediately.
:param file_name: The full path of input file.
:type file_name: str
:return: The full path of local file.
:rtype: str
"""
if not file_name.startswith('gs://'):
return file_name
# Extracts bucket_id and object_id by first removing 'gs://' prefix and
# then split the remaining by path delimiter '/'.
path_components = file_name[self.GCS_PREFIX_LENGTH:].split('/')
if len(path_components) < 2:
raise Exception(
'Invalid Google Cloud Storage (GCS) object path: {}'
.format(file_name))
bucket_id = path_components[0]
object_id = '/'.join(path_components[1:])
local_file = '/tmp/dataflow{}-{}'.format(str(uuid.uuid4())[:8],
path_components[-1])
self._gcs_hook.download(bucket_id, object_id, local_file)
if os.stat(local_file).st_size > 0:
return local_file
raise Exception(
'Failed to download Google Cloud Storage (GCS) object: {}'
.format(file_name))
| {
"content_hash": "c0f187e6f19f520ccfbe800968fd7251",
"timestamp": "",
"source": "github",
"line_count": 407,
"max_line_length": 93,
"avg_line_length": 39.57248157248157,
"alnum_prop": 0.6103936421209487,
"repo_name": "fenglu-g/incubator-airflow",
"id": "e880642f6067c9315cf9da99697e7f42af60f06f",
"size": "16917",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/contrib/operators/dataflow_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "3634"
},
{
"name": "HTML",
"bytes": "129454"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5852162"
},
{
"name": "Shell",
"bytes": "41793"
}
],
"symlink_target": ""
} |
from mrcrowbar.colour import TEST_PALETTE
from mrcrowbar import models as mrc
from mrcrowbar.lib.images import base as img
class TIMFile( mrc.Block ):
file_name = mrc.Bytes( length=13 )
size = mrc.UInt32_LE()
data = mrc.Bytes( length=mrc.Ref( "size" ) )
@property
def repr( self ):
return self.file_name.split( b"\x00" )[0].decode( "utf8" )
class ResourceTIM( mrc.Block ):
raw_data = mrc.Bytes()
def __init__( self, *args, **kwargs ):
super().__init__( *args, **kwargs )
self.store = mrc.Store( self, mrc.Ref( "raw_data" ) )
class TIMFileEntry( mrc.Block ):
_file = mrc.StoreRef(
TIMFile, mrc.Ref( "_parent._resource.store" ), mrc.Ref( "offset" )
)
name_hash = mrc.Int32_LE()
offset = mrc.UInt32_LE()
class TIMFileStruct( mrc.Block ):
_resource = None # replace with the ResourceTIM object
file_name = mrc.Bytes( length=13 )
entry_count = mrc.UInt16_LE()
entries = mrc.BlockField( TIMFileEntry, count=mrc.Ref( "entry_count" ) )
class ResourceMapTIM( mrc.Block ):
hash_index = mrc.Bytes( length=4 )
file_count = mrc.UInt16_LE()
files = mrc.BlockField( TIMFileStruct, count=mrc.Ref( "file_count" ) )
class BitmapFrame( mrc.Block ):
width = mrc.UInt16_LE()
height = mrc.UInt16_LE()
unk1 = mrc.UInt8()
size = mrc.UInt32_LE()
image_data = mrc.Bytes( length=mrc.Ref( "size" ) )
def __init__( self, *args, **kwargs ):
super().__init__( *args, **kwargs )
self.image = img.IndexedImage(
self,
width=mrc.Ref( "width" ),
height=mrc.Ref( "height" ),
source=mrc.Ref( "image_data" ),
palette=mrc.Ref( "_parent._parent._palette" ),
)
class BitmapData( mrc.Block ):
unk1 = mrc.UInt16_LE()
frame_count = mrc.UInt16_LE()
frame_offsets = mrc.UInt32_LE( count=mrc.Ref( "frame_count" ) )
raw_data = mrc.Bytes()
@property
def base_offset( self ):
return -self.get_field_end_offset( "frame_offsets" ) - 8
def __init__( self, *args, **kwargs ):
self.store = mrc.LinearStore(
parent=self,
source=mrc.Ref( "raw_data" ),
block_klass=BitmapFrame,
offsets=mrc.Ref( "frame_offsets" ),
base_offset=mrc.Ref( "base_offset" ),
)
super().__init__( *args, **kwargs )
class BitmapTIM( mrc.Block ):
magic = mrc.Const( mrc.Bytes( length=4 ), b"BMP:" )
size = mrc.UInt32_LE()
bitmap_data = mrc.BlockField( BitmapData, length=mrc.Ref( "size" ) )
# replace this at load time
_palette = TEST_PALETTE
| {
"content_hash": "130f44b734a946dd599330ed2c1374e4",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 76,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.5859582542694497,
"repo_name": "moralrecordings/mrcrowbar",
"id": "887c9954d4cdf7583ae3001c503eef75ffb9a6c3",
"size": "2635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mrcrowbar/lib/games/sierra.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "563354"
}
],
"symlink_target": ""
} |
"""
The project pages map for basic_i18n
"""
from optimus.pages.views.base import PageViewBase
# from optimus.conf import settings
"""
Page objects
"""
class Index(PageViewBase):
"""
Default index page
"""
title = "My project"
template_name = "index.html"
destination = "index_{language_code}.html"
# Enabled pages to build
PAGES = [
Index(destination="index.html"),
Index(lang="fr_FR"),
]
| {
"content_hash": "82dd376eb95066291067f438ca954707",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 49,
"avg_line_length": 16.46153846153846,
"alnum_prop": 0.6542056074766355,
"repo_name": "sveetch/Optimus",
"id": "b667e4537717165b73460eeac2f1a6215850ae07",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/data_fixtures/minimal_i18n/pages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14380"
},
{
"name": "HTML",
"bytes": "16553"
},
{
"name": "JavaScript",
"bytes": "101904"
},
{
"name": "Makefile",
"bytes": "1564"
},
{
"name": "Python",
"bytes": "245913"
},
{
"name": "Ruby",
"bytes": "855"
},
{
"name": "Smarty",
"bytes": "8827"
}
],
"symlink_target": ""
} |
import datetime
import functools
import html
import logging
import os
import re
import time
import urllib
import urllib.parse
from collections import defaultdict, deque
from dataclasses import dataclass
from io import StringIO
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from typing.re import Match, Pattern
from xml.etree import ElementTree as etree
from xml.etree.ElementTree import Element, SubElement
import ahocorasick
import dateutil.parser
import dateutil.tz
import markdown
import requests
from django.conf import settings
from django.db.models import Q
from hyperlink import parse
from markdown.extensions import codehilite, nl2br, sane_lists, tables
from typing_extensions import TypedDict
from zerver.lib import mention as mention
from zerver.lib.bugdown import fenced_code
from zerver.lib.bugdown.fenced_code import FENCE_RE
from zerver.lib.cache import NotFoundInCache, cache_with_key
from zerver.lib.camo import get_camo_url
from zerver.lib.emoji import (
codepoint_to_name,
emoticon_regex,
name_to_codepoint,
translate_emoticons,
)
from zerver.lib.exceptions import BugdownRenderingException
from zerver.lib.mention import extract_user_group, possible_mentions, possible_user_group_mentions
from zerver.lib.tex import render_tex
from zerver.lib.thumbnail import user_uploads_or_external
from zerver.lib.timeout import TimeoutExpired, timeout
from zerver.lib.timezone import get_common_timezones
from zerver.lib.url_encoding import encode_stream, hash_util_encode
from zerver.lib.url_preview import preview as link_preview
from zerver.models import (
MAX_MESSAGE_LENGTH,
Message,
Realm,
UserGroup,
UserGroupMembership,
UserProfile,
all_realm_filters,
get_active_streams,
realm_filters_for_realm,
)
ReturnT = TypeVar('ReturnT')
def one_time(method: Callable[[], ReturnT]) -> Callable[[], ReturnT]:
'''
Use this decorator with extreme caution.
The function you wrap should have no dependency
on any arguments (no args, no kwargs) nor should
it depend on any global state.
'''
val = None
def cache_wrapper() -> ReturnT:
nonlocal val
if val is None:
val = method()
return val
return cache_wrapper
class FullNameInfo(TypedDict):
id: int
email: str
full_name: str
DbData = Dict[str, Any]
# Format version of the bugdown rendering; stored along with rendered
# messages so that we can efficiently determine what needs to be re-rendered
version = 1
_T = TypeVar('_T')
ElementStringNone = Union[Element, Optional[str]]
AVATAR_REGEX = r'!avatar\((?P<email>[^)]*)\)'
GRAVATAR_REGEX = r'!gravatar\((?P<email>[^)]*)\)'
EMOJI_REGEX = r'(?P<syntax>:[\w\-\+]+:)'
def verbose_compile(pattern: str) -> Any:
return re.compile(
f"^(.*?){pattern}(.*?)$",
re.DOTALL | re.UNICODE | re.VERBOSE,
)
def normal_compile(pattern: str) -> Any:
return re.compile(
fr"^(.*?){pattern}(.*)$",
re.DOTALL | re.UNICODE,
)
STREAM_LINK_REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
\#\*\* # and after hash sign followed by double asterisks
(?P<stream_name>[^\*]+) # stream name can contain anything
\*\* # ends by double asterisks
"""
@one_time
def get_compiled_stream_link_regex() -> Pattern:
return verbose_compile(STREAM_LINK_REGEX)
STREAM_TOPIC_LINK_REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
\#\*\* # and after hash sign followed by double asterisks
(?P<stream_name>[^\*>]+) # stream name can contain anything except >
> # > acts as separator
(?P<topic_name>[^\*]+) # topic name can contain anything
\*\* # ends by double asterisks
"""
@one_time
def get_compiled_stream_topic_link_regex() -> Pattern:
return verbose_compile(STREAM_TOPIC_LINK_REGEX)
LINK_REGEX: Pattern = None
def get_web_link_regex() -> str:
# We create this one time, but not at startup. So the
# first message rendered in any process will have some
# extra costs. It's roughly 75ms to run this code, so
# caching the value in LINK_REGEX is super important here.
global LINK_REGEX
if LINK_REGEX is not None:
return LINK_REGEX
tlds = '|'.join(list_of_tlds())
# A link starts at a word boundary, and ends at space, punctuation, or end-of-input.
#
# We detect a url either by the `https?://` or by building around the TLD.
# In lieu of having a recursive regex (which python doesn't support) to match
# arbitrary numbers of nested matching parenthesis, we manually build a regexp that
# can match up to six
# The inner_paren_contents chunk matches the innermore non-parenthesis-holding text,
# and the paren_group matches text with, optionally, a matching set of parens
inner_paren_contents = r"[^\s()\"]*"
paren_group = r"""
[^\s()\"]*? # Containing characters that won't end the URL
(?: \( %s \) # and more characters in matched parens
[^\s()\"]*? # followed by more characters
)* # zero-or-more sets of paired parens
"""
nested_paren_chunk = paren_group
for i in range(6):
nested_paren_chunk = nested_paren_chunk % (paren_group,)
nested_paren_chunk = nested_paren_chunk % (inner_paren_contents,)
file_links = r"| (?:file://(/[^/ ]*)+/?)" if settings.ENABLE_FILE_LINKS else r""
REGEX = fr"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
# (Double-negative lookbehind to allow start-of-string)
(?P<url> # Main group
(?:(?: # Domain part
https?://[\w.:@-]+? # If it has a protocol, anything goes.
|(?: # Or, if not, be more strict to avoid false-positives
(?:[\w-]+\.)+ # One or more domain components, separated by dots
(?:{tlds}) # TLDs (filled in via format from tlds-alpha-by-domain.txt)
)
)
(?:/ # A path, beginning with /
{nested_paren_chunk} # zero-to-6 sets of paired parens
)?) # Path is optional
| (?:[\w.-]+\@[\w.-]+\.[\w]+) # Email is separate, since it can't have a path
{file_links} # File path start with file:///, enable by setting ENABLE_FILE_LINKS=True
| (?:bitcoin:[13][a-km-zA-HJ-NP-Z1-9]{{25,34}}) # Bitcoin address pattern, see https://mokagio.github.io/tech-journal/2014/11/21/regex-bitcoin.html
)
(?= # URL must be followed by (not included in group)
[!:;\?\),\.\'\"\>]* # Optional punctuation characters
(?:\Z|\s) # followed by whitespace or end of string
)
"""
LINK_REGEX = verbose_compile(REGEX)
return LINK_REGEX
def clear_state_for_testing() -> None:
# The link regex never changes in production, but our tests
# try out both sides of ENABLE_FILE_LINKS, so we need
# a way to clear it.
global LINK_REGEX
LINK_REGEX = None
bugdown_logger = logging.getLogger()
def rewrite_local_links_to_relative(db_data: Optional[DbData], link: str) -> str:
"""If the link points to a local destination (e.g. #narrow/...),
generate a relative link that will open it in the current window.
"""
if db_data:
realm_uri_prefix = db_data['realm_uri'] + "/"
if (
link.startswith(realm_uri_prefix)
and urllib.parse.urljoin(realm_uri_prefix, link[len(realm_uri_prefix):])
== link
):
return link[len(realm_uri_prefix):]
return link
def url_embed_preview_enabled(message: Optional[Message]=None,
realm: Optional[Realm]=None,
no_previews: bool=False) -> bool:
if not settings.INLINE_URL_EMBED_PREVIEW:
return False
if no_previews:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_url_embed_preview
def image_preview_enabled(message: Optional[Message]=None,
realm: Optional[Realm]=None,
no_previews: bool=False) -> bool:
if not settings.INLINE_IMAGE_PREVIEW:
return False
if no_previews:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_image_preview
def list_of_tlds() -> List[str]:
# HACK we manually blacklist a few domains
blacklist = ['PY\n', "MD\n"]
# tlds-alpha-by-domain.txt comes from https://data.iana.org/TLD/tlds-alpha-by-domain.txt
tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt')
tlds = [tld.lower().strip() for tld in open(tlds_file)
if tld not in blacklist and not tld[0].startswith('#')]
tlds.sort(key=len, reverse=True)
return tlds
def walk_tree(root: Element,
processor: Callable[[Element], Optional[_T]],
stop_after_first: bool=False) -> List[_T]:
results = []
queue = deque([root])
while queue:
currElement = queue.popleft()
for child in currElement:
if child:
queue.append(child)
result = processor(child)
if result is not None:
results.append(result)
if stop_after_first:
return results
return results
@dataclass
class ElementFamily:
grandparent: Optional[Element]
parent: Element
child: Element
in_blockquote: bool
T = TypeVar("T")
class ResultWithFamily(Generic[T]):
family: ElementFamily
result: T
def __init__(self, family: ElementFamily, result: T):
self.family = family
self.result = result
class ElementPair:
parent: Optional["ElementPair"]
value: Element
def __init__(self, parent: Optional["ElementPair"], value: Element):
self.parent = parent
self.value = value
def walk_tree_with_family(root: Element,
processor: Callable[[Element], Optional[_T]],
) -> List[ResultWithFamily[_T]]:
results = []
queue = deque([ElementPair(parent=None, value=root)])
while queue:
currElementPair = queue.popleft()
for child in currElementPair.value:
if child:
queue.append(ElementPair(parent=currElementPair, value=child))
result = processor(child)
if result is not None:
if currElementPair.parent is not None:
grandparent_element = currElementPair.parent
grandparent = grandparent_element.value
else:
grandparent = None
family = ElementFamily(
grandparent=grandparent,
parent=currElementPair.value,
child=child,
in_blockquote=has_blockquote_ancestor(currElementPair),
)
results.append(ResultWithFamily(
family=family,
result=result,
))
return results
def has_blockquote_ancestor(element_pair: Optional[ElementPair]) -> bool:
if element_pair is None:
return False
elif element_pair.value.tag == 'blockquote':
return True
else:
return has_blockquote_ancestor(element_pair.parent)
@cache_with_key(lambda tweet_id: tweet_id, cache_name="database", with_statsd_key="tweet_data")
def fetch_tweet_data(tweet_id: str) -> Optional[Dict[str, Any]]:
if settings.TEST_SUITE:
from . import testing_mocks
res = testing_mocks.twitter(tweet_id)
else:
creds = {
'consumer_key': settings.TWITTER_CONSUMER_KEY,
'consumer_secret': settings.TWITTER_CONSUMER_SECRET,
'access_token_key': settings.TWITTER_ACCESS_TOKEN_KEY,
'access_token_secret': settings.TWITTER_ACCESS_TOKEN_SECRET,
}
if not all(creds.values()):
return None
# We lazily import twitter here because its import process is
# surprisingly slow, and doing so has a significant impact on
# the startup performance of `manage.py` commands.
import twitter
try:
api = twitter.Api(tweet_mode='extended', **creds)
# Sometimes Twitter hangs on responses. Timing out here
# will cause the Tweet to go through as-is with no inline
# preview, rather than having the message be rejected
# entirely. This timeout needs to be less than our overall
# formatting timeout.
tweet = timeout(3, api.GetStatus, tweet_id)
res = tweet.AsDict()
except AttributeError:
bugdown_logger.error('Unable to load twitter api, you may have the wrong '
'library installed, see https://github.com/zulip/zulip/issues/86')
return None
except TimeoutExpired:
# We'd like to try again later and not cache the bad result,
# so we need to re-raise the exception (just as though
# we were being rate-limited)
raise
except twitter.TwitterError as e:
t = e.args[0]
if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):
# Code 34 means that the message doesn't exist; return
# None so that we will cache the error
return None
elif len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 88 or
t[0]['code'] == 130):
# Code 88 means that we were rate-limited and 130
# means Twitter is having capacity issues; either way
# just raise the error so we don't cache None and will
# try again later.
raise
else:
# It's not clear what to do in cases of other errors,
# but for now it seems reasonable to log at error
# level (so that we get notified), but then cache the
# failure to proceed with our usual work
bugdown_logger.exception("Unknown error fetching tweet data")
return None
return res
HEAD_START_RE = re.compile('^head[ >]')
HEAD_END_RE = re.compile('^/head[ >]')
META_START_RE = re.compile('^meta[ >]')
META_END_RE = re.compile('^/meta[ >]')
def fetch_open_graph_image(url: str) -> Optional[Dict[str, Any]]:
in_head = False
# HTML will auto close meta tags, when we start the next tag add
# a closing tag if it has not been closed yet.
last_closed = True
head = []
# TODO: What if response content is huge? Should we get headers first?
try:
content = requests.get(url, timeout=1).text
except Exception:
return None
# Extract the head and meta tags
# All meta tags are self closing, have no children or are closed
# automatically.
for part in content.split('<'):
if not in_head and HEAD_START_RE.match(part):
# Started the head node output it to have a document root
in_head = True
head.append('<head>')
elif in_head and HEAD_END_RE.match(part):
# Found the end of the head close any remaining tag then stop
# processing
in_head = False
if not last_closed:
last_closed = True
head.append('</meta>')
head.append('</head>')
break
elif in_head and META_START_RE.match(part):
# Found a meta node copy it
if not last_closed:
head.append('</meta>')
last_closed = True
head.append('<')
head.append(part)
if '/>' not in part:
last_closed = False
elif in_head and META_END_RE.match(part):
# End of a meta node just copy it to close the tag
head.append('<')
head.append(part)
last_closed = True
try:
doc = etree.fromstring(''.join(head))
except etree.ParseError:
return None
og_image = doc.find('meta[@property="og:image"]')
og_title = doc.find('meta[@property="og:title"]')
og_desc = doc.find('meta[@property="og:description"]')
title = None
desc = None
if og_image is not None:
image = og_image.get('content')
else:
return None
if og_title is not None:
title = og_title.get('content')
if og_desc is not None:
desc = og_desc.get('content')
return {'image': image, 'title': title, 'desc': desc}
def get_tweet_id(url: str) -> Optional[str]:
parsed_url = urllib.parse.urlparse(url)
if not (parsed_url.netloc == 'twitter.com' or parsed_url.netloc.endswith('.twitter.com')):
return None
to_match = parsed_url.path
# In old-style twitter.com/#!/wdaher/status/1231241234-style URLs,
# we need to look at the fragment instead
if parsed_url.path == '/' and len(parsed_url.fragment) > 5:
to_match = parsed_url.fragment
tweet_id_match = re.match(r'^!?/.*?/status(es)?/(?P<tweetid>\d{10,30})(/photo/[0-9])?/?$', to_match)
if not tweet_id_match:
return None
return tweet_id_match.group("tweetid")
class InlineHttpsProcessor(markdown.treeprocessors.Treeprocessor):
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_imgs = walk_tree(root, lambda e: e if e.tag == "img" else None)
for img in found_imgs:
url = img.get("src")
if urllib.parse.urlsplit(url).scheme != "http":
# Don't rewrite images on our own site (e.g. emoji).
continue
img.set("src", get_camo_url(url))
class BacktickInlineProcessor(markdown.inlinepatterns.BacktickInlineProcessor):
""" Return a `<code>` element containing the matching text. """
def handleMatch(self, m: Match[str], data: str) -> Tuple[Union[None, Element], int, int]:
# Let upstream's implementation do its job as it is, we'll
# just replace the text to not strip the group because it
# makes it impossible to put leading/trailing whitespace in
# an inline code block.
el, start, end = super().handleMatch(m, data)
if m.group(3):
# upstream's code here is: m.group(3).strip() rather than m.group(3).
el.text = markdown.util.AtomicString(markdown.util.code_escape(m.group(3)))
return el, start, end
class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
TWITTER_MAX_IMAGE_HEIGHT = 400
TWITTER_MAX_TO_PREVIEW = 3
INLINE_PREVIEW_LIMIT_PER_MESSAGE = 5
def __init__(self, md: markdown.Markdown) -> None:
markdown.treeprocessors.Treeprocessor.__init__(self, md)
def add_a(
self,
root: Element,
url: str,
link: str,
title: Optional[str]=None,
desc: Optional[str]=None,
class_attr: str="message_inline_image",
data_id: Optional[str]=None,
insertion_index: Optional[int]=None,
already_thumbnailed: bool=False,
) -> None:
desc = desc if desc is not None else ""
# Update message.has_image attribute.
if 'message_inline_image' in class_attr and self.md.zulip_message:
self.md.zulip_message.has_image = True
if insertion_index is not None:
div = Element("div")
root.insert(insertion_index, div)
else:
div = SubElement(root, "div")
div.set("class", class_attr)
a = SubElement(div, "a")
a.set("href", link)
if title is not None:
a.set("title", title)
if data_id is not None:
a.set("data-id", data_id)
img = SubElement(a, "img")
if settings.THUMBNAIL_IMAGES and (not already_thumbnailed) and user_uploads_or_external(url):
# See docs/thumbnailing.md for some high-level documentation.
#
# We strip leading '/' from relative URLs here to ensure
# consistency in what gets passed to /thumbnail
url = url.lstrip('/')
img.set("src", "/thumbnail?url={}&size=thumbnail".format(
urllib.parse.quote(url, safe=''),
))
img.set('data-src-fullsize', "/thumbnail?url={}&size=full".format(
urllib.parse.quote(url, safe=''),
))
else:
img.set("src", url)
if class_attr == "message_inline_ref":
summary_div = SubElement(div, "div")
title_div = SubElement(summary_div, "div")
title_div.set("class", "message_inline_image_title")
title_div.text = title
desc_div = SubElement(summary_div, "desc")
desc_div.set("class", "message_inline_image_desc")
def add_oembed_data(self, root: Element, link: str, extracted_data: Dict[str, Any]) -> bool:
oembed_resource_type = extracted_data.get('type', '')
title = extracted_data.get('title')
if oembed_resource_type == 'photo':
image = extracted_data.get('image')
if image:
self.add_a(root, image, link, title=title)
return True
elif oembed_resource_type == 'video':
html = extracted_data['html']
image = extracted_data['image']
title = extracted_data.get('title')
description = extracted_data.get('description')
self.add_a(root, image, link, title, description,
"embed-video message_inline_image",
html, already_thumbnailed=True)
return True
return False
def add_embed(self, root: Element, link: str, extracted_data: Dict[str, Any]) -> None:
oembed = extracted_data.get('oembed', False)
if oembed and self.add_oembed_data(root, link, extracted_data):
return
img_link = extracted_data.get('image')
if not img_link:
# Don't add an embed if an image is not found
return
container = SubElement(root, "div")
container.set("class", "message_embed")
parsed_img_link = urllib.parse.urlparse(img_link)
# Append domain where relative img_link url is given
if not parsed_img_link.netloc:
parsed_url = urllib.parse.urlparse(link)
domain = '{url.scheme}://{url.netloc}/'.format(url=parsed_url)
img_link = urllib.parse.urljoin(domain, img_link)
img = SubElement(container, "a")
img.set("style", "background-image: url(" + img_link + ")")
img.set("href", link)
img.set("class", "message_embed_image")
data_container = SubElement(container, "div")
data_container.set("class", "data-container")
title = extracted_data.get('title')
if title:
title_elm = SubElement(data_container, "div")
title_elm.set("class", "message_embed_title")
a = SubElement(title_elm, "a")
a.set("href", link)
a.set("title", title)
a.text = title
description = extracted_data.get('description')
if description:
description_elm = SubElement(data_container, "div")
description_elm.set("class", "message_embed_description")
description_elm.text = description
def get_actual_image_url(self, url: str) -> str:
# Add specific per-site cases to convert image-preview urls to image urls.
# See https://github.com/zulip/zulip/issues/4658 for more information
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'github.com' or parsed_url.netloc.endswith('.github.com')):
# https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png ->
# https://raw.githubusercontent.com/zulip/zulip/master/static/images/logo/zulip-icon-128x128.png
split_path = parsed_url.path.split('/')
if len(split_path) > 3 and split_path[3] == "blob":
return urllib.parse.urljoin('https://raw.githubusercontent.com',
'/'.join(split_path[0:3] + split_path[4:]))
return url
def is_image(self, url: str) -> bool:
if not self.md.image_preview_enabled:
return False
parsed_url = urllib.parse.urlparse(url)
# remove html urls which end with img extensions that can not be shorted
if parsed_url.netloc == 'pasteboard.co':
return False
# List from https://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093
for ext in [".bmp", ".gif", ".jpe", "jpeg", ".jpg", ".png", ".webp"]:
if parsed_url.path.lower().endswith(ext):
return True
return False
def corrected_image_source(self, url: str) -> str:
# This function adjusts any urls from linx.li and
# wikipedia.org to point to the actual image url. It's
# structurally very similar to dropbox_image, and possibly
# should be rewritten to use open graph, but has some value.
parsed_url = urllib.parse.urlparse(url)
if parsed_url.netloc.lower().endswith('.wikipedia.org'):
# Redirecting from "/wiki/File:" to "/wiki/Special:FilePath/File:"
# A possible alternative, that avoids the redirect after hitting "Special:"
# is using the first characters of md5($filename) to generate the url
domain = parsed_url.scheme + "://" + parsed_url.netloc
correct_url = domain + parsed_url.path[:6] + 'Special:FilePath' + parsed_url.path[5:]
return correct_url
if parsed_url.netloc == 'linx.li':
return 'https://linx.li/s' + parsed_url.path
return None
def dropbox_image(self, url: str) -> Optional[Dict[str, Any]]:
# TODO: The returned Dict could possibly be a TypedDict in future.
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'dropbox.com' or parsed_url.netloc.endswith('.dropbox.com')):
is_album = parsed_url.path.startswith('/sc/') or parsed_url.path.startswith('/photos/')
# Only allow preview Dropbox shared links
if not (parsed_url.path.startswith('/s/') or
parsed_url.path.startswith('/sh/') or
is_album):
return None
# Try to retrieve open graph protocol info for a preview
# This might be redundant right now for shared links for images.
# However, we might want to make use of title and description
# in the future. If the actual image is too big, we might also
# want to use the open graph image.
image_info = fetch_open_graph_image(url)
is_image = is_album or self.is_image(url)
# If it is from an album or not an actual image file,
# just use open graph image.
if is_album or not is_image:
# Failed to follow link to find an image preview so
# use placeholder image and guess filename
if image_info is None:
return None
image_info["is_image"] = is_image
return image_info
# Otherwise, try to retrieve the actual image.
# This is because open graph image from Dropbox may have padding
# and gifs do not work.
# TODO: What if image is huge? Should we get headers first?
if image_info is None:
image_info = dict()
image_info['is_image'] = True
parsed_url_list = list(parsed_url)
parsed_url_list[4] = "dl=1" # Replaces query
image_info["image"] = urllib.parse.urlunparse(parsed_url_list)
return image_info
return None
def youtube_id(self, url: str) -> Optional[str]:
if not self.md.image_preview_enabled:
return None
# Youtube video id extraction regular expression from https://pastebin.com/KyKAFv1s
# Slightly modified to support URLs of the forms
# - youtu.be/<id>
# - youtube.com/playlist?v=<id>&list=<list-id>
# - youtube.com/watch_videos?video_ids=<id1>,<id2>,<id3>
# If it matches, match.group(2) is the video id.
schema_re = r'(?:https?://)'
host_re = r'(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)'
param_re = r'(?:(?:(?:v|embed)/)|' + \
r'(?:(?:(?:watch|playlist)(?:_popup|_videos)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v(?:ideo_ids)?=))'
id_re = r'([0-9A-Za-z_-]+)'
youtube_re = r'^({schema_re}?{host_re}{param_re}?)?{id_re}(?(1).+)?$'
youtube_re = youtube_re.format(schema_re=schema_re, host_re=host_re, id_re=id_re, param_re=param_re)
match = re.match(youtube_re, url)
# URLs of the form youtube.com/playlist?list=<list-id> are incorrectly matched
if match is None or match.group(2) == 'playlist':
return None
return match.group(2)
def youtube_title(self, extracted_data: Dict[str, Any]) -> Optional[str]:
title = extracted_data.get("title")
if title is not None:
return f"YouTube - {title}"
return None
def youtube_image(self, url: str) -> Optional[str]:
yt_id = self.youtube_id(url)
if yt_id is not None:
return f"https://i.ytimg.com/vi/{yt_id}/default.jpg"
return None
def vimeo_id(self, url: str) -> Optional[str]:
if not self.md.image_preview_enabled:
return None
#(http|https)?:\/\/(www\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/([^\/]*)\/videos\/|)(\d+)(?:|\/\?)
# If it matches, match.group('id') is the video id.
vimeo_re = r'^((http|https)?:\/\/(www\.)?vimeo.com\/' + \
r'(?:channels\/(?:\w+\/)?|groups\/' + \
r'([^\/]*)\/videos\/|)(\d+)(?:|\/\?))$'
match = re.match(vimeo_re, url)
if match is None:
return None
return match.group(5)
def vimeo_title(self, extracted_data: Dict[str, Any]) -> Optional[str]:
title = extracted_data.get("title")
if title is not None:
return f"Vimeo - {title}"
return None
def twitter_text(self, text: str,
urls: List[Dict[str, str]],
user_mentions: List[Dict[str, Any]],
media: List[Dict[str, Any]]) -> Element:
"""
Use data from the twitter API to turn links, mentions and media into A
tags. Also convert unicode emojis to images.
This works by using the urls, user_mentions and media data from
the twitter API and searching for unicode emojis in the text using
`unicode_emoji_regex`.
The first step is finding the locations of the URLs, mentions, media and
emoji in the text. For each match we build a dictionary with type, the start
location, end location, the URL to link to, and the text(codepoint and title
in case of emojis) to be used in the link(image in case of emojis).
Next we sort the matches by start location. And for each we add the
text from the end of the last link to the start of the current link to
the output. The text needs to added to the text attribute of the first
node (the P tag) or the tail the last link created.
Finally we add any remaining text to the last node.
"""
to_process: List[Dict[str, Any]] = []
# Build dicts for URLs
for url_data in urls:
short_url = url_data["url"]
full_url = url_data["expanded_url"]
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'url',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': full_url,
})
# Build dicts for mentions
for user_mention in user_mentions:
screen_name = user_mention['screen_name']
mention_string = '@' + screen_name
for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE):
to_process.append({
'type': 'mention',
'start': match.start(),
'end': match.end(),
'url': 'https://twitter.com/' + urllib.parse.quote(screen_name),
'text': mention_string,
})
# Build dicts for media
for media_item in media:
short_url = media_item['url']
expanded_url = media_item['expanded_url']
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'media',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': expanded_url,
})
# Build dicts for emojis
for match in re.finditer(unicode_emoji_regex, text, re.IGNORECASE):
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
to_process.append({
'type': 'emoji',
'start': match.start(),
'end': match.end(),
'codepoint': codepoint,
'title': display_string,
})
to_process.sort(key=lambda x: x['start'])
p = current_node = Element('p')
def set_text(text: str) -> None:
"""
Helper to set the text or the tail of the current_node
"""
if current_node == p:
current_node.text = text
else:
current_node.tail = text
db_data = self.md.zulip_db_data
current_index = 0
for item in to_process:
# The text we want to link starts in already linked text skip it
if item['start'] < current_index:
continue
# Add text from the end of last link to the start of the current
# link
set_text(text[current_index:item['start']])
current_index = item['end']
if item['type'] != 'emoji':
elem = url_to_a(db_data, item['url'], item['text'])
assert isinstance(elem, Element)
else:
elem = make_emoji(item['codepoint'], item['title'])
current_node = elem
p.append(elem)
# Add any unused text
set_text(text[current_index:])
return p
def twitter_link(self, url: str) -> Optional[Element]:
tweet_id = get_tweet_id(url)
if tweet_id is None:
return None
try:
res = fetch_tweet_data(tweet_id)
if res is None:
return None
user: Dict[str, Any] = res['user']
tweet = Element("div")
tweet.set("class", "twitter-tweet")
img_a = SubElement(tweet, 'a')
img_a.set("href", url)
profile_img = SubElement(img_a, 'img')
profile_img.set('class', 'twitter-avatar')
# For some reason, for, e.g. tweet 285072525413724161,
# python-twitter does not give us a
# profile_image_url_https, but instead puts that URL in
# profile_image_url. So use _https if available, but fall
# back gracefully.
image_url = user.get('profile_image_url_https', user['profile_image_url'])
profile_img.set('src', image_url)
text = html.unescape(res['full_text'])
urls = res.get('urls', [])
user_mentions = res.get('user_mentions', [])
media: List[Dict[str, Any]] = res.get('media', [])
p = self.twitter_text(text, urls, user_mentions, media)
tweet.append(p)
span = SubElement(tweet, 'span')
span.text = "- {} (@{})".format(user['name'], user['screen_name'])
# Add image previews
for media_item in media:
# Only photos have a preview image
if media_item['type'] != 'photo':
continue
# Find the image size that is smaller than
# TWITTER_MAX_IMAGE_HEIGHT px tall or the smallest
size_name_tuples = list(media_item['sizes'].items())
size_name_tuples.sort(reverse=True,
key=lambda x: x[1]['h'])
for size_name, size in size_name_tuples:
if size['h'] < self.TWITTER_MAX_IMAGE_HEIGHT:
break
media_url = '{}:{}'.format(media_item['media_url_https'], size_name)
img_div = SubElement(tweet, 'div')
img_div.set('class', 'twitter-image')
img_a = SubElement(img_div, 'a')
img_a.set('href', media_item['url'])
img = SubElement(img_a, 'img')
img.set('src', media_url)
return tweet
except Exception:
# We put this in its own try-except because it requires external
# connectivity. If Twitter flakes out, we don't want to not-render
# the entire message; we just want to not show the Twitter preview.
bugdown_logger.warning("Error building Twitter link", exc_info=True)
return None
def get_url_data(self, e: Element) -> Optional[Tuple[str, Optional[str]]]:
if e.tag == "a":
return (e.get("href"), e.text)
return None
def handle_image_inlining(
self,
root: Element,
found_url: ResultWithFamily[Tuple[str, Optional[str]]],
) -> None:
grandparent = found_url.family.grandparent
parent = found_url.family.parent
ahref_element = found_url.family.child
(url, text) = found_url.result
actual_url = self.get_actual_image_url(url)
# url != text usually implies a named link, which we opt not to remove
url_eq_text = text is None or url == text
title = None if url_eq_text else text
if parent.tag == 'li':
self.add_a(parent, self.get_actual_image_url(url), url, title=title)
if not parent.text and not ahref_element.tail and url_eq_text:
parent.remove(ahref_element)
elif parent.tag == 'p':
parent_index = None
for index, uncle in enumerate(grandparent):
if uncle is parent:
parent_index = index
break
if parent_index is not None:
ins_index = self.find_proper_insertion_index(grandparent, parent, parent_index)
self.add_a(grandparent, actual_url, url, title=title, insertion_index=ins_index)
else:
# We're not inserting after parent, since parent not found.
# Append to end of list of grandparent's children as normal
self.add_a(grandparent, actual_url, url, title=title)
# If link is alone in a paragraph, delete paragraph containing it
if (len(parent) == 1 and
(not parent.text or parent.text == "\n") and
not ahref_element.tail and
url_eq_text):
grandparent.remove(parent)
else:
# If none of the above criteria match, fall back to old behavior
self.add_a(root, actual_url, url, title=title)
def find_proper_insertion_index(self, grandparent: Element, parent: Element,
parent_index_in_grandparent: int) -> int:
# If there are several inline images from same paragraph, ensure that
# they are in correct (and not opposite) order by inserting after last
# inline image from paragraph 'parent'
parent_links = [ele.attrib['href'] for ele in parent.iter(tag="a")]
insertion_index = parent_index_in_grandparent
while True:
insertion_index += 1
if insertion_index >= len(grandparent):
return insertion_index
uncle = grandparent[insertion_index]
inline_image_classes = ['message_inline_image', 'message_inline_ref']
if (
uncle.tag != 'div' or
'class' not in uncle.keys() or
uncle.attrib['class'] not in inline_image_classes
):
return insertion_index
uncle_link = list(uncle.iter(tag="a"))[0].attrib['href']
if uncle_link not in parent_links:
return insertion_index
def is_absolute_url(self, url: str) -> bool:
return bool(urllib.parse.urlparse(url).netloc)
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_urls = walk_tree_with_family(root, self.get_url_data)
unique_urls = {found_url.result[0] for found_url in found_urls}
# Collect unique URLs which are not quoted as we don't do
# inline previews for links inside blockquotes.
unique_previewable_urls = {found_url.result[0] for found_url in found_urls
if not found_url.family.in_blockquote}
# Set has_link and similar flags whenever a message is processed by bugdown
if self.md.zulip_message:
self.md.zulip_message.has_link = len(found_urls) > 0
self.md.zulip_message.has_image = False # This is updated in self.add_a
self.md.zulip_message.potential_attachment_path_ids = []
for url in unique_urls:
# Due to rewrite_local_links_to_relative, we need to
# handle both relative URLs beginning with
# `/user_uploads` and beginning with `user_uploads`.
# This urllib construction converts the latter into
# the former.
parsed_url = urllib.parse.urlsplit(urllib.parse.urljoin("/", url))
host = parsed_url.netloc
if host != '' and host != self.md.zulip_realm.host:
continue
if not parsed_url.path.startswith("/user_uploads/"):
continue
path_id = parsed_url.path[len("/user_uploads/"):]
self.md.zulip_message.potential_attachment_path_ids.append(path_id)
if len(found_urls) == 0:
return
if len(unique_previewable_urls) > self.INLINE_PREVIEW_LIMIT_PER_MESSAGE:
return
processed_urls: Set[str] = set()
rendered_tweet_count = 0
for found_url in found_urls:
(url, text) = found_url.result
if url in unique_previewable_urls and url not in processed_urls:
processed_urls.add(url)
else:
continue
if not self.is_absolute_url(url):
if self.is_image(url):
self.handle_image_inlining(root, found_url)
# We don't have a strong use case for doing url preview for relative links.
continue
dropbox_image = self.dropbox_image(url)
if dropbox_image is not None:
class_attr = "message_inline_ref"
is_image = dropbox_image["is_image"]
if is_image:
class_attr = "message_inline_image"
# Not making use of title and description of images
self.add_a(root, dropbox_image['image'], url,
title=dropbox_image.get('title'),
desc=dropbox_image.get('desc', ""),
class_attr=class_attr,
already_thumbnailed=True)
continue
if self.is_image(url):
image_source = self.corrected_image_source(url)
if image_source is not None:
found_url = ResultWithFamily(
family=found_url.family,
result=(image_source, image_source),
)
self.handle_image_inlining(root, found_url)
continue
if get_tweet_id(url) is not None:
if rendered_tweet_count >= self.TWITTER_MAX_TO_PREVIEW:
# Only render at most one tweet per message
continue
twitter_data = self.twitter_link(url)
if twitter_data is None:
# This link is not actually a tweet known to twitter
continue
rendered_tweet_count += 1
div = SubElement(root, "div")
div.set("class", "inline-preview-twitter")
div.insert(0, twitter_data)
continue
youtube = self.youtube_image(url)
if youtube is not None:
yt_id = self.youtube_id(url)
self.add_a(root, youtube, url, None, None,
"youtube-video message_inline_image",
yt_id, already_thumbnailed=True)
# NOTE: We don't `continue` here, to allow replacing the URL with
# the title, if INLINE_URL_EMBED_PREVIEW feature is enabled.
# The entire preview would ideally be shown only if the feature
# is enabled, but URL previews are a beta feature and YouTube
# previews are pretty stable.
db_data = self.md.zulip_db_data
if db_data and db_data['sent_by_bot']:
continue
if not self.md.url_embed_preview_enabled:
continue
try:
extracted_data = link_preview.link_embed_data_from_cache(url)
except NotFoundInCache:
self.md.zulip_message.links_for_preview.add(url)
continue
if extracted_data:
if youtube is not None:
title = self.youtube_title(extracted_data)
if title is not None:
found_url.family.child.text = title
continue
self.add_embed(root, url, extracted_data)
if self.vimeo_id(url):
title = self.vimeo_title(extracted_data)
if title:
found_url.family.child.text = title
class Avatar(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
img = Element('img')
email_address = match.group('email')
email = email_address.strip().lower()
profile_id = None
db_data = self.md.zulip_db_data
if db_data is not None:
user_dict = db_data['email_info'].get(email)
if user_dict is not None:
profile_id = user_dict['id']
img.set('class', 'message_body_gravatar')
img.set('src', f'/avatar/{profile_id or email}?s=30')
img.set('title', email)
img.set('alt', email)
return img
def possible_avatar_emails(content: str) -> Set[str]:
emails = set()
for REGEX in [AVATAR_REGEX, GRAVATAR_REGEX]:
matches = re.findall(REGEX, content)
for email in matches:
if email:
emails.add(email)
return emails
class Timestamp(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
time_input_string = match.group('time')
timestamp = None
try:
timestamp = dateutil.parser.parse(time_input_string, tzinfos=get_common_timezones())
except ValueError:
try:
timestamp = datetime.datetime.fromtimestamp(float(time_input_string))
except ValueError:
pass
if not timestamp:
error_element = Element('span')
error_element.set('class', 'timestamp-error')
error_element.text = markdown.util.AtomicString(
f"Invalid time format: {time_input_string}")
return error_element
# Use HTML5 <time> element for valid timestamps.
time_element = Element('time')
if timestamp.tzinfo:
timestamp = timestamp.astimezone(datetime.timezone.utc)
else:
timestamp = timestamp.replace(tzinfo=datetime.timezone.utc)
time_element.set('datetime', timestamp.isoformat().replace('+00:00', 'Z'))
# Set text to initial input, so simple clients translating
# HTML to text will at least display something.
time_element.text = markdown.util.AtomicString(time_input_string)
return time_element
# All of our emojis(non ZWJ sequences) belong to one of these unicode blocks:
# \U0001f100-\U0001f1ff - Enclosed Alphanumeric Supplement
# \U0001f200-\U0001f2ff - Enclosed Ideographic Supplement
# \U0001f300-\U0001f5ff - Miscellaneous Symbols and Pictographs
# \U0001f600-\U0001f64f - Emoticons (Emoji)
# \U0001f680-\U0001f6ff - Transport and Map Symbols
# \U0001f900-\U0001f9ff - Supplemental Symbols and Pictographs
# \u2000-\u206f - General Punctuation
# \u2300-\u23ff - Miscellaneous Technical
# \u2400-\u243f - Control Pictures
# \u2440-\u245f - Optical Character Recognition
# \u2460-\u24ff - Enclosed Alphanumerics
# \u2500-\u257f - Box Drawing
# \u2580-\u259f - Block Elements
# \u25a0-\u25ff - Geometric Shapes
# \u2600-\u26ff - Miscellaneous Symbols
# \u2700-\u27bf - Dingbats
# \u2900-\u297f - Supplemental Arrows-B
# \u2b00-\u2bff - Miscellaneous Symbols and Arrows
# \u3000-\u303f - CJK Symbols and Punctuation
# \u3200-\u32ff - Enclosed CJK Letters and Months
unicode_emoji_regex = '(?P<syntax>['\
'\U0001F100-\U0001F64F' \
'\U0001F680-\U0001F6FF' \
'\U0001F900-\U0001F9FF' \
'\u2000-\u206F' \
'\u2300-\u27BF' \
'\u2900-\u297F' \
'\u2B00-\u2BFF' \
'\u3000-\u303F' \
'\u3200-\u32FF' \
'])'
# The equivalent JS regex is \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f]|\ud83d[\ude80-\udeff]|
# \ud83e[\udd00-\uddff]|[\u2000-\u206f]|[\u2300-\u27bf]|[\u2b00-\u2bff]|[\u3000-\u303f]|
# [\u3200-\u32ff]. See below comments for explanation. The JS regex is used by marked.js for
# frontend unicode emoji processing.
# The JS regex \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f] represents U0001f100-\U0001f64f
# The JS regex \ud83d[\ude80-\udeff] represents \U0001f680-\U0001f6ff
# The JS regex \ud83e[\udd00-\uddff] represents \U0001f900-\U0001f9ff
# The JS regex [\u2000-\u206f] represents \u2000-\u206f
# The JS regex [\u2300-\u27bf] represents \u2300-\u27bf
# Similarly other JS regexes can be mapped to the respective unicode blocks.
# For more information, please refer to the following article:
# http://crocodillon.com/blog/parsing-emoji-unicode-in-javascript
def make_emoji(codepoint: str, display_string: str) -> Element:
# Replace underscore in emoji's title with space
title = display_string[1:-1].replace("_", " ")
span = Element('span')
span.set('class', f'emoji emoji-{codepoint}')
span.set('title', title)
span.set('role', 'img')
span.set('aria-label', title)
span.text = markdown.util.AtomicString(display_string)
return span
def make_realm_emoji(src: str, display_string: str) -> Element:
elt = Element('img')
elt.set('src', src)
elt.set('class', 'emoji')
elt.set("alt", display_string)
elt.set("title", display_string[1:-1].replace("_", " "))
return elt
def unicode_emoji_to_codepoint(unicode_emoji: str) -> str:
codepoint = hex(ord(unicode_emoji))[2:]
# Unicode codepoints are minimum of length 4, padded
# with zeroes if the length is less than zero.
while len(codepoint) < 4:
codepoint = '0' + codepoint
return codepoint
class EmoticonTranslation(markdown.inlinepatterns.Pattern):
""" Translates emoticons like `:)` into emoji like `:smile:`. """
def handleMatch(self, match: Match[str]) -> Optional[Element]:
db_data = self.md.zulip_db_data
if db_data is None or not db_data['translate_emoticons']:
return None
emoticon = match.group('emoticon')
translated = translate_emoticons(emoticon)
name = translated[1:-1]
return make_emoji(name_to_codepoint[name], translated)
class UnicodeEmoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
return make_emoji(codepoint, display_string)
else:
return None
class Emoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
orig_syntax = match.group("syntax")
name = orig_syntax[1:-1]
active_realm_emoji: Dict[str, Dict[str, str]] = {}
db_data = self.md.zulip_db_data
if db_data is not None:
active_realm_emoji = db_data['active_realm_emoji']
if self.md.zulip_message and name in active_realm_emoji:
return make_realm_emoji(active_realm_emoji[name]['source_url'], orig_syntax)
elif name == 'zulip':
return make_realm_emoji('/static/generated/emoji/images/emoji/unicode/zulip.png', orig_syntax)
elif name in name_to_codepoint:
return make_emoji(name_to_codepoint[name], orig_syntax)
else:
return orig_syntax
def content_has_emoji_syntax(content: str) -> bool:
return re.search(EMOJI_REGEX, content) is not None
class Tex(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Element:
rendered = render_tex(match.group('body'), is_inline=True)
if rendered is not None:
# We need to give Python-Markdown an ElementTree object, but if we
# give it one with correctly stored XML namespaces, it will mangle
# everything when serializing it. So we play this stupid game to
# store xmlns as a normal attribute. :-[
assert ' zulip-xmlns="' not in rendered
rendered = rendered.replace(' xmlns="', ' zulip-xmlns="')
parsed = etree.iterparse(StringIO(rendered))
for event, elem in parsed:
if 'zulip-xmlns' in elem.attrib:
elem.attrib['xmlns'] = elem.attrib.pop('zulip-xmlns')
root = elem
return root
else: # Something went wrong while rendering
span = Element('span')
span.set('class', 'tex-error')
span.text = '$$' + match.group('body') + '$$'
return span
def sanitize_url(url: str) -> Optional[str]:
"""
Sanitize a url against xss attacks.
See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url.
"""
try:
parts = urllib.parse.urlparse(url.replace(' ', '%20'))
scheme, netloc, path, params, query, fragment = parts
except ValueError:
# Bad url - so bad it couldn't be parsed.
return ''
# If there is no scheme or netloc and there is a '@' in the path,
# treat it as a mailto: and set the appropriate scheme
if scheme == '' and netloc == '' and '@' in path:
scheme = 'mailto'
elif scheme == '' and netloc == '' and len(path) > 0 and path[0] == '/':
# Allow domain-relative links
return urllib.parse.urlunparse(('', '', path, params, query, fragment))
elif (scheme, netloc, path, params, query) == ('', '', '', '', '') and len(fragment) > 0:
# Allow fragment links
return urllib.parse.urlunparse(('', '', '', '', '', fragment))
# Zulip modification: If scheme is not specified, assume http://
# We re-enter sanitize_url because netloc etc. need to be re-parsed.
if not scheme:
return sanitize_url('http://' + url)
locless_schemes = ['mailto', 'news', 'file', 'bitcoin']
if netloc == '' and scheme not in locless_schemes:
# This fails regardless of anything else.
# Return immediately to save additional processing
return None
# Upstream code will accept a URL like javascript://foo because it
# appears to have a netloc. Additionally there are plenty of other
# schemes that do weird things like launch external programs. To be
# on the safe side, we whitelist the scheme.
if scheme not in ('http', 'https', 'ftp', 'mailto', 'file', 'bitcoin'):
return None
# Upstream code scans path, parameters, and query for colon characters
# because
#
# some aliases [for javascript:] will appear to urllib.parse to have
# no scheme. On top of that relative links (i.e.: "foo/bar.html")
# have no scheme.
#
# We already converted an empty scheme to http:// above, so we skip
# the colon check, which would also forbid a lot of legitimate URLs.
# Url passes all tests. Return url as-is.
return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
def url_to_a(db_data: Optional[DbData], url: str, text: Optional[str]=None) -> Union[Element, str]:
a = Element('a')
href = sanitize_url(url)
if href is None:
# Rejected by sanitize_url; render it as plain text.
return url
if text is None:
text = markdown.util.AtomicString(url)
href = rewrite_local_links_to_relative(db_data, href)
a.set('href', href)
a.text = text
return a
class CompiledPattern(markdown.inlinepatterns.Pattern):
def __init__(self, compiled_re: Pattern, md: markdown.Markdown) -> None:
# This is similar to the superclass's small __init__ function,
# but we skip the compilation step and let the caller give us
# a compiled regex.
self.compiled_re = compiled_re
self.md = md
class AutoLink(CompiledPattern):
def handleMatch(self, match: Match[str]) -> ElementStringNone:
url = match.group('url')
db_data = self.md.zulip_db_data
return url_to_a(db_data, url)
class OListProcessor(sane_lists.SaneOListProcessor):
def __init__(self, parser: Any) -> None:
parser.md.tab_length = 2
super().__init__(parser)
parser.md.tab_length = 4
class UListProcessor(sane_lists.SaneUListProcessor):
""" Unordered lists, but with 2-space indent """
def __init__(self, parser: Any) -> None:
parser.md.tab_length = 2
super().__init__(parser)
parser.md.tab_length = 4
class ListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
""" Process unordered list blocks.
Based on markdown.blockprocessors.ListIndentProcessor, but with 2-space indent
"""
def __init__(self, parser: Any) -> None:
# HACK: Set the tab length to 2 just for the initialization of
# this class, so that bulleted lists (and only bulleted lists)
# work off 2-space indentation.
parser.md.tab_length = 2
super().__init__(parser)
parser.md.tab_length = 4
class HashHeaderProcessor(markdown.blockprocessors.HashHeaderProcessor):
""" Process Hash Headers.
Based on markdown.blockprocessors.HashHeaderProcessor, but requires space for heading.
"""
# Original regex for hashheader is
# RE = re.compile(r'(?:^|\n)(?P<level>#{1,6})(?P<header>(?:\\.|[^\\])*?)#*(?:\n|$)')
RE = re.compile(r'(?:^|\n)(?P<level>#{1,6})\s(?P<header>(?:\\.|[^\\])*?)#*(?:\n|$)')
class BlockQuoteProcessor(markdown.blockprocessors.BlockQuoteProcessor):
""" Process BlockQuotes.
Based on markdown.blockprocessors.BlockQuoteProcessor, but with 2-space indent
"""
# Original regex for blockquote is RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
RE = re.compile(r'(^|\n)(?!(?:[ ]{0,3}>\s*(?:$|\n))*(?:$|\n))'
r'[ ]{0,3}>[ ]?(.*)')
mention_re = re.compile(mention.find_mentions)
def clean(self, line: str) -> str:
# Silence all the mentions inside blockquotes
line = re.sub(self.mention_re, lambda m: "@_{}".format(m.group('match')), line)
# And then run the upstream processor's code for removing the '>'
return super().clean(line)
@dataclass
class Fence:
fence_str: str
is_code: bool
class BugdownListPreprocessor(markdown.preprocessors.Preprocessor):
""" Allows list blocks that come directly after another block
to be rendered as a list.
Detects paragraphs that have a matching list item that comes
directly after a line of text, and inserts a newline between
to satisfy Markdown"""
LI_RE = re.compile(r'^[ ]*([*+-]|\d\.)[ ]+(.*)', re.MULTILINE)
def run(self, lines: List[str]) -> List[str]:
""" Insert a newline between a paragraph and ulist if missing """
inserts = 0
in_code_fence: bool = False
open_fences: List[Fence] = []
copy = lines[:]
for i in range(len(lines) - 1):
# Ignore anything that is inside a fenced code block but not quoted.
# We ignore all lines where some parent is a non quote code block.
m = FENCE_RE.match(lines[i])
if m:
fence_str = m.group('fence')
is_code = not m.group('lang') in ('quote', 'quoted')
has_open_fences = not len(open_fences) == 0
matches_last_fence = fence_str == open_fences[-1].fence_str if has_open_fences else False
closes_last_fence = not m.group('lang') and matches_last_fence
if closes_last_fence:
open_fences.pop()
else:
open_fences.append(Fence(fence_str, is_code))
in_code_fence = any([fence.is_code for fence in open_fences])
# If we're not in a fenced block and we detect an upcoming list
# hanging off any block (including a list of another type), add
# a newline.
li1 = self.LI_RE.match(lines[i])
li2 = self.LI_RE.match(lines[i+1])
if not in_code_fence and lines[i]:
if (li2 and not li1) or (li1 and li2 and
(len(li1.group(1)) == 1) != (len(li2.group(1)) == 1)):
copy.insert(i+inserts+1, '')
inserts += 1
return copy
# Name for the outer capture group we use to separate whitespace and
# other delimiters from the actual content. This value won't be an
# option in user-entered capture groups.
OUTER_CAPTURE_GROUP = "linkifier_actual_match"
def prepare_realm_pattern(source: str) -> str:
"""Augment a realm filter so it only matches after start-of-string,
whitespace, or opening delimiters, won't match if there are word
characters directly after, and saves what was matched as
OUTER_CAPTURE_GROUP."""
return fr"""(?<![^\s'"\(,:<])(?P<{OUTER_CAPTURE_GROUP}>{source})(?!\w)"""
# Given a regular expression pattern, linkifies groups that match it
# using the provided format string to construct the URL.
class RealmFilterPattern(markdown.inlinepatterns.Pattern):
""" Applied a given realm filter to the input """
def __init__(self, source_pattern: str,
format_string: str,
markdown_instance: Optional[markdown.Markdown]=None) -> None:
self.pattern = prepare_realm_pattern(source_pattern)
self.format_string = format_string
markdown.inlinepatterns.Pattern.__init__(self, self.pattern, markdown_instance)
def handleMatch(self, m: Match[str]) -> Union[Element, str]:
db_data = self.md.zulip_db_data
return url_to_a(db_data,
self.format_string % m.groupdict(),
m.group(OUTER_CAPTURE_GROUP))
class UserMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[str]) -> Optional[Element]:
match = m.group('match')
silent = m.group('silent') == '_'
db_data = self.md.zulip_db_data
if self.md.zulip_message and db_data is not None:
if match.startswith("**") and match.endswith("**"):
name = match[2:-2]
else:
return None
wildcard = mention.user_mention_matches_wildcard(name)
id_syntax_match = re.match(r'.+\|(?P<user_id>\d+)$', name)
if id_syntax_match:
id = id_syntax_match.group("user_id")
user = db_data['mention_data'].get_user_by_id(id)
else:
user = db_data['mention_data'].get_user_by_name(name)
if wildcard:
self.md.zulip_message.mentions_wildcard = True
user_id = "*"
elif user:
if not silent:
self.md.zulip_message.mentions_user_ids.add(user['id'])
name = user['full_name']
user_id = str(user['id'])
else:
# Don't highlight @mentions that don't refer to a valid user
return None
el = Element("span")
el.set('data-user-id', user_id)
text = f"{name}"
if silent:
el.set('class', 'user-mention silent')
else:
el.set('class', 'user-mention')
text = f"@{text}"
el.text = markdown.util.AtomicString(text)
return el
return None
class UserGroupMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[str]) -> Optional[Element]:
match = m.group(2)
db_data = self.md.zulip_db_data
if self.md.zulip_message and db_data is not None:
name = extract_user_group(match)
user_group = db_data['mention_data'].get_user_group(name)
if user_group:
self.md.zulip_message.mentions_user_group_ids.add(user_group.id)
name = user_group.name
user_group_id = str(user_group.id)
else:
# Don't highlight @-mentions that don't refer to a valid user
# group.
return None
el = Element("span")
el.set('class', 'user-group-mention')
el.set('data-user-group-id', user_group_id)
text = f"@{name}"
el.text = markdown.util.AtomicString(text)
return el
return None
class StreamPattern(CompiledPattern):
def find_stream_by_name(self, name: Match[str]) -> Optional[Dict[str, Any]]:
db_data = self.md.zulip_db_data
if db_data is None:
return None
stream = db_data['stream_names'].get(name)
return stream
def handleMatch(self, m: Match[str]) -> Optional[Element]:
name = m.group('stream_name')
if self.md.zulip_message:
stream = self.find_stream_by_name(name)
if stream is None:
return None
el = Element('a')
el.set('class', 'stream')
el.set('data-stream-id', str(stream['id']))
# TODO: We should quite possibly not be specifying the
# href here and instead having the browser auto-add the
# href when it processes a message with one of these, to
# provide more clarity to API clients.
# Also do the same for StreamTopicPattern.
stream_url = encode_stream(stream['id'], name)
el.set('href', f'/#narrow/stream/{stream_url}')
text = f'#{name}'
el.text = markdown.util.AtomicString(text)
return el
return None
class StreamTopicPattern(CompiledPattern):
def find_stream_by_name(self, name: Match[str]) -> Optional[Dict[str, Any]]:
db_data = self.md.zulip_db_data
if db_data is None:
return None
stream = db_data['stream_names'].get(name)
return stream
def handleMatch(self, m: Match[str]) -> Optional[Element]:
stream_name = m.group('stream_name')
topic_name = m.group('topic_name')
if self.md.zulip_message:
stream = self.find_stream_by_name(stream_name)
if stream is None or topic_name is None:
return None
el = Element('a')
el.set('class', 'stream-topic')
el.set('data-stream-id', str(stream['id']))
stream_url = encode_stream(stream['id'], stream_name)
topic_url = hash_util_encode(topic_name)
link = f'/#narrow/stream/{stream_url}/topic/{topic_url}'
el.set('href', link)
text = f'#{stream_name} > {topic_name}'
el.text = markdown.util.AtomicString(text)
return el
return None
def possible_linked_stream_names(content: str) -> Set[str]:
matches = re.findall(STREAM_LINK_REGEX, content, re.VERBOSE)
for match in re.finditer(STREAM_TOPIC_LINK_REGEX, content, re.VERBOSE):
matches.append(match.group('stream_name'))
return set(matches)
class AlertWordNotificationProcessor(markdown.preprocessors.Preprocessor):
allowed_before_punctuation = {' ', '\n', '(', '"', '.', ',', '\'', ';', '[', '*', '`', '>'}
allowed_after_punctuation = {' ', '\n', ')', '",', '?', ':', '.', ',', '\'', ';', ']', '!',
'*', '`'}
def check_valid_start_position(self, content: str, index: int) -> bool:
if index <= 0 or content[index] in self.allowed_before_punctuation:
return True
return False
def check_valid_end_position(self, content: str, index: int) -> bool:
if index >= len(content) or content[index] in self.allowed_after_punctuation:
return True
return False
def run(self, lines: Iterable[str]) -> Iterable[str]:
db_data = self.md.zulip_db_data
if self.md.zulip_message and db_data is not None:
# We check for alert words here, the set of which are
# dependent on which users may see this message.
#
# Our caller passes in the list of possible_words. We
# don't do any special rendering; we just append the alert words
# we find to the set self.md.zulip_message.alert_words.
realm_alert_words_automaton = db_data['realm_alert_words_automaton']
if realm_alert_words_automaton is not None:
content = '\n'.join(lines).lower()
for end_index, (original_value, user_ids) in realm_alert_words_automaton.iter(content):
if self.check_valid_start_position(content, end_index - len(original_value)) and \
self.check_valid_end_position(content, end_index + 1):
self.md.zulip_message.user_ids_with_alert_words.update(user_ids)
return lines
class LinkInlineProcessor(markdown.inlinepatterns.LinkInlineProcessor):
def zulip_specific_link_changes(self, el: Element) -> Union[None, Element]:
href = el.get('href')
# Sanitize url or don't parse link. See linkify_tests in markdown_test_cases for banned syntax.
href = sanitize_url(self.unescape(href.strip()))
if href is None:
return None # no-op; the link is not processed.
# Rewrite local links to be relative
db_data = self.md.zulip_db_data
href = rewrite_local_links_to_relative(db_data, href)
# Make changes to <a> tag attributes
el.set("href", href)
# Show link href if title is empty
if not el.text.strip():
el.text = href
# Prevent realm_filters from running on the content of a Markdown link, breaking up the link.
# This is a monkey-patch, but it might be worth sending a version of this change upstream.
el.text = markdown.util.AtomicString(el.text)
return el
def handleMatch(self, m: Match[str], data: str) -> Tuple[Union[None, Element], int, int]:
el, match_start, index = super().handleMatch(m, data)
if el is not None:
el = self.zulip_specific_link_changes(el)
return el, match_start, index
def get_sub_registry(r: markdown.util.Registry, keys: List[str]) -> markdown.util.Registry:
# Registry is a new class added by py-markdown to replace Ordered List.
# Since Registry doesn't support .keys(), it is easier to make a new
# object instead of removing keys from the existing object.
new_r = markdown.util.Registry()
for k in keys:
new_r.register(r[k], k, r.get_index_for_name(k))
return new_r
# These are used as keys ("realm_filters_keys") to md_engines and the respective
# realm filter caches
DEFAULT_BUGDOWN_KEY = -1
ZEPHYR_MIRROR_BUGDOWN_KEY = -2
class Bugdown(markdown.Markdown):
def __init__(self, *args: Any, **kwargs: Union[bool, int, List[Any]]) -> None:
# define default configs
self.config = {
"realm_filters": [kwargs['realm_filters'],
"Realm-specific filters for realm_filters_key {}".format(kwargs['realm'])],
"realm": [kwargs['realm'], "Realm id"],
"code_block_processor_disabled": [kwargs['code_block_processor_disabled'],
"Disabled for email gateway"],
}
super().__init__(*args, **kwargs)
self.set_output_format('html')
def build_parser(self) -> markdown.Markdown:
# Build the parser using selected default features from py-markdown.
# The complete list of all available processors can be found in the
# super().build_parser() function.
#
# Note: for any py-markdown updates, manually check if we want any
# of the new features added upstream or not; they wouldn't get
# included by default.
self.preprocessors = self.build_preprocessors()
self.parser = self.build_block_parser()
self.inlinePatterns = self.build_inlinepatterns()
self.treeprocessors = self.build_treeprocessors()
self.postprocessors = self.build_postprocessors()
self.handle_zephyr_mirror()
return self
def build_preprocessors(self) -> markdown.util.Registry:
# We disable the following preprocessors from upstream:
#
# html_block - insecure
# reference - references don't make sense in a chat context.
preprocessors = markdown.util.Registry()
preprocessors.register(BugdownListPreprocessor(self), 'hanging_lists', 35)
preprocessors.register(markdown.preprocessors.NormalizeWhitespace(self), 'normalize_whitespace', 30)
preprocessors.register(fenced_code.FencedBlockPreprocessor(self), 'fenced_code_block', 25)
preprocessors.register(AlertWordNotificationProcessor(self), 'custom_text_notifications', 20)
return preprocessors
def build_block_parser(self) -> markdown.util.Registry:
# We disable the following blockparsers from upstream:
#
# indent - replaced by ours
# setextheader - disabled; we only support hashheaders for headings
# olist - replaced by ours
# ulist - replaced by ours
# quote - replaced by ours
parser = markdown.blockprocessors.BlockParser(self)
parser.blockprocessors.register(markdown.blockprocessors.EmptyBlockProcessor(parser), 'empty', 95)
parser.blockprocessors.register(ListIndentProcessor(parser), 'indent', 90)
if not self.getConfig('code_block_processor_disabled'):
parser.blockprocessors.register(markdown.blockprocessors.CodeBlockProcessor(parser), 'code', 85)
parser.blockprocessors.register(HashHeaderProcessor(parser), 'hashheader', 80)
# We get priority 75 from 'table' extension
parser.blockprocessors.register(markdown.blockprocessors.HRProcessor(parser), 'hr', 70)
parser.blockprocessors.register(OListProcessor(parser), 'olist', 65)
parser.blockprocessors.register(UListProcessor(parser), 'ulist', 60)
parser.blockprocessors.register(BlockQuoteProcessor(parser), 'quote', 55)
parser.blockprocessors.register(markdown.blockprocessors.ParagraphProcessor(parser), 'paragraph', 50)
return parser
def build_inlinepatterns(self) -> markdown.util.Registry:
# We disable the following upstream inline patterns:
#
# backtick - replaced by ours
# escape - probably will re-add at some point.
# link - replaced by ours
# image_link - replaced by ours
# autolink - replaced by ours
# automail - replaced by ours
# linebreak - we use nl2br and consider that good enough
# html - insecure
# reference - references not useful
# image_reference - references not useful
# short_reference - references not useful
# ---------------------------------------------------
# strong_em - for these three patterns,
# strong2 - we have our own versions where
# emphasis2 - we disable _ for bold and emphasis
# Declare regexes for clean single line calls to .register().
NOT_STRONG_RE = markdown.inlinepatterns.NOT_STRONG_RE
# Custom strikethrough syntax: ~~foo~~
DEL_RE = r'(?<!~)(\~\~)([^~\n]+?)(\~\~)(?!~)'
# Custom bold syntax: **foo** but not __foo__
# str inside ** must start and end with a word character
# it need for things like "const char *x = (char *)y"
EMPHASIS_RE = r'(\*)(?!\s+)([^\*^\n]+)(?<!\s)\*'
ENTITY_RE = markdown.inlinepatterns.ENTITY_RE
STRONG_EM_RE = r'(\*\*\*)(?!\s+)([^\*^\n]+)(?<!\s)\*\*\*'
# Add Inline Patterns. We use a custom numbering of the
# rules, that preserves the order from upstream but leaves
# space for us to add our own.
reg = markdown.util.Registry()
reg.register(BacktickInlineProcessor(markdown.inlinepatterns.BACKTICK_RE), 'backtick', 105)
reg.register(markdown.inlinepatterns.DoubleTagPattern(STRONG_EM_RE, 'strong,em'), 'strong_em', 100)
reg.register(UserMentionPattern(mention.find_mentions, self), 'usermention', 95)
reg.register(Tex(r'\B(?<!\$)\$\$(?P<body>[^\n_$](\\\$|[^$\n])*)\$\$(?!\$)\B'), 'tex', 90)
reg.register(StreamTopicPattern(get_compiled_stream_topic_link_regex(), self), 'topic', 87)
reg.register(StreamPattern(get_compiled_stream_link_regex(), self), 'stream', 85)
reg.register(Avatar(AVATAR_REGEX, self), 'avatar', 80)
reg.register(Timestamp(r'!time\((?P<time>[^)]*)\)'), 'timestamp', 75)
# Note that !gravatar syntax should be deprecated long term.
reg.register(Avatar(GRAVATAR_REGEX, self), 'gravatar', 70)
reg.register(UserGroupMentionPattern(mention.user_group_mentions, self), 'usergroupmention', 65)
reg.register(LinkInlineProcessor(markdown.inlinepatterns.LINK_RE, self), 'link', 60)
reg.register(AutoLink(get_web_link_regex(), self), 'autolink', 55)
# Reserve priority 45-54 for Realm Filters
reg = self.register_realm_filters(reg)
reg.register(markdown.inlinepatterns.HtmlInlineProcessor(ENTITY_RE, self), 'entity', 40)
reg.register(markdown.inlinepatterns.SimpleTagPattern(r'(\*\*)([^\n]+?)\2', 'strong'), 'strong', 35)
reg.register(markdown.inlinepatterns.SimpleTagPattern(EMPHASIS_RE, 'em'), 'emphasis', 30)
reg.register(markdown.inlinepatterns.SimpleTagPattern(DEL_RE, 'del'), 'del', 25)
reg.register(markdown.inlinepatterns.SimpleTextInlineProcessor(NOT_STRONG_RE), 'not_strong', 20)
reg.register(Emoji(EMOJI_REGEX, self), 'emoji', 15)
reg.register(EmoticonTranslation(emoticon_regex, self), 'translate_emoticons', 10)
# We get priority 5 from 'nl2br' extension
reg.register(UnicodeEmoji(unicode_emoji_regex), 'unicodeemoji', 0)
return reg
def register_realm_filters(self, inlinePatterns: markdown.util.Registry) -> markdown.util.Registry:
for (pattern, format_string, id) in self.getConfig("realm_filters"):
inlinePatterns.register(RealmFilterPattern(pattern, format_string, self),
f'realm_filters/{pattern}', 45)
return inlinePatterns
def build_treeprocessors(self) -> markdown.util.Registry:
# Here we build all the processors from upstream, plus a few of our own.
treeprocessors = markdown.util.Registry()
# We get priority 30 from 'hilite' extension
treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25)
treeprocessors.register(markdown.treeprocessors.PrettifyTreeprocessor(self), 'prettify', 20)
treeprocessors.register(InlineInterestingLinkProcessor(self), 'inline_interesting_links', 15)
if settings.CAMO_URI:
treeprocessors.register(InlineHttpsProcessor(self), 'rewrite_to_https', 10)
return treeprocessors
def build_postprocessors(self) -> markdown.util.Registry:
# These are the default python-markdown processors, unmodified.
postprocessors = markdown.util.Registry()
postprocessors.register(markdown.postprocessors.RawHtmlPostprocessor(self), 'raw_html', 20)
postprocessors.register(markdown.postprocessors.AndSubstitutePostprocessor(), 'amp_substitute', 15)
postprocessors.register(markdown.postprocessors.UnescapePostprocessor(), 'unescape', 10)
return postprocessors
def getConfig(self, key: str, default: str='') -> Any:
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return default
def handle_zephyr_mirror(self) -> None:
if self.getConfig("realm") == ZEPHYR_MIRROR_BUGDOWN_KEY:
# Disable almost all inline patterns for zephyr mirror
# users' traffic that is mirrored. Note that
# inline_interesting_links is a treeprocessor and thus is
# not removed
self.inlinePatterns = get_sub_registry(self.inlinePatterns, ['autolink'])
self.treeprocessors = get_sub_registry(self.treeprocessors, ['inline_interesting_links',
'rewrite_to_https'])
# insert new 'inline' processor because we have changed self.inlinePatterns
# but InlineProcessor copies md as self.md in __init__.
self.treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25)
self.preprocessors = get_sub_registry(self.preprocessors, ['custom_text_notifications'])
self.parser.blockprocessors = get_sub_registry(self.parser.blockprocessors, ['paragraph'])
md_engines: Dict[Tuple[int, bool], markdown.Markdown] = {}
realm_filter_data: Dict[int, List[Tuple[str, str, int]]] = {}
def make_md_engine(realm_filters_key: int, email_gateway: bool) -> None:
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
del md_engines[md_engine_key]
realm_filters = realm_filter_data[realm_filters_key]
md_engines[md_engine_key] = build_engine(
realm_filters=realm_filters,
realm_filters_key=realm_filters_key,
email_gateway=email_gateway,
)
def build_engine(realm_filters: List[Tuple[str, str, int]],
realm_filters_key: int,
email_gateway: bool) -> markdown.Markdown:
engine = Bugdown(
realm_filters=realm_filters,
realm=realm_filters_key,
code_block_processor_disabled=email_gateway,
extensions = [
nl2br.makeExtension(),
tables.makeExtension(),
codehilite.makeExtension(
linenums=False,
guess_lang=False,
),
])
return engine
# Split the topic name into multiple sections so that we can easily use
# our common single link matching regex on it.
basic_link_splitter = re.compile(r'[ !;\?\),\'\"]')
# Security note: We don't do any HTML escaping in this
# function on the URLs; they are expected to be HTML-escaped when
# rendered by clients (just as links rendered into message bodies
# are validated and escaped inside `url_to_a`).
def topic_links(realm_filters_key: int, topic_name: str) -> List[str]:
matches: List[str] = []
realm_filters = realm_filters_for_realm(realm_filters_key)
for realm_filter in realm_filters:
pattern = prepare_realm_pattern(realm_filter[0])
for m in re.finditer(pattern, topic_name):
matches += [realm_filter[1] % m.groupdict()]
# Also make raw urls navigable.
for sub_string in basic_link_splitter.split(topic_name):
link_match = re.match(get_web_link_regex(), sub_string)
if link_match:
url = link_match.group('url')
url_object = parse(url)
if not url_object.scheme:
url = url_object.replace(scheme='https').to_text()
matches.append(url)
return matches
def maybe_update_markdown_engines(realm_filters_key: Optional[int], email_gateway: bool) -> None:
# If realm_filters_key is None, load all filters
global realm_filter_data
if realm_filters_key is None:
all_filters = all_realm_filters()
all_filters[DEFAULT_BUGDOWN_KEY] = []
for realm_filters_key, filters in all_filters.items():
realm_filter_data[realm_filters_key] = filters
make_md_engine(realm_filters_key, email_gateway)
# Hack to ensure that getConfig("realm") is right for mirrored Zephyrs
realm_filter_data[ZEPHYR_MIRROR_BUGDOWN_KEY] = []
make_md_engine(ZEPHYR_MIRROR_BUGDOWN_KEY, False)
else:
realm_filters = realm_filters_for_realm(realm_filters_key)
if realm_filters_key not in realm_filter_data or \
realm_filter_data[realm_filters_key] != realm_filters:
# Realm filters data has changed, update `realm_filter_data` and any
# of the existing markdown engines using this set of realm filters.
realm_filter_data[realm_filters_key] = realm_filters
for email_gateway_flag in [True, False]:
if (realm_filters_key, email_gateway_flag) in md_engines:
# Update only existing engines(if any), don't create new one.
make_md_engine(realm_filters_key, email_gateway_flag)
if (realm_filters_key, email_gateway) not in md_engines:
# Markdown engine corresponding to this key doesn't exists so create one.
make_md_engine(realm_filters_key, email_gateway)
# We want to log Markdown parser failures, but shouldn't log the actual input
# message for privacy reasons. The compromise is to replace all alphanumeric
# characters with 'x'.
#
# We also use repr() to improve reproducibility, and to escape terminal control
# codes, which can do surprisingly nasty things.
_privacy_re = re.compile('\\w', flags=re.UNICODE)
def privacy_clean_markdown(content: str) -> str:
return repr(_privacy_re.sub('x', content))
def log_bugdown_error(msg: str) -> None:
"""We use this unusual logging approach to log the bugdown error, in
order to prevent AdminNotifyHandler from sending the sanitized
original markdown formatting into another Zulip message, which
could cause an infinite exception loop."""
bugdown_logger.error(msg)
def get_email_info(realm_id: int, emails: Set[str]) -> Dict[str, FullNameInfo]:
if not emails:
return dict()
q_list = {
Q(email__iexact=email.strip().lower())
for email in emails
}
rows = UserProfile.objects.filter(
realm_id=realm_id,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'email',
)
dct = {
row['email'].strip().lower(): row
for row in rows
}
return dct
def get_possible_mentions_info(realm_id: int, mention_texts: Set[str]) -> List[FullNameInfo]:
if not mention_texts:
return list()
# Remove the trailing part of the `name|id` mention syntax,
# thus storing only full names in full_names.
full_names = set()
name_re = r'(?P<full_name>.+)\|\d+$'
for mention_text in mention_texts:
name_syntax_match = re.match(name_re, mention_text)
if name_syntax_match:
full_names.add(name_syntax_match.group("full_name"))
else:
full_names.add(mention_text)
q_list = {
Q(full_name__iexact=full_name)
for full_name in full_names
}
rows = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'full_name',
'email',
)
return list(rows)
class MentionData:
def __init__(self, realm_id: int, content: str) -> None:
mention_texts, has_wildcards = possible_mentions(content)
possible_mentions_info = get_possible_mentions_info(realm_id, mention_texts)
self.full_name_info = {
row['full_name'].lower(): row
for row in possible_mentions_info
}
self.user_id_info = {
row['id']: row
for row in possible_mentions_info
}
self.init_user_group_data(realm_id=realm_id, content=content)
self.has_wildcards = has_wildcards
def message_has_wildcards(self) -> bool:
return self.has_wildcards
def init_user_group_data(self,
realm_id: int,
content: str) -> None:
user_group_names = possible_user_group_mentions(content)
self.user_group_name_info = get_user_group_name_info(realm_id, user_group_names)
self.user_group_members: Dict[int, List[int]] = defaultdict(list)
group_ids = [group.id for group in self.user_group_name_info.values()]
if not group_ids:
# Early-return to avoid the cost of hitting the ORM,
# which shows up in profiles.
return
membership = UserGroupMembership.objects.filter(user_group_id__in=group_ids)
for info in membership.values('user_group_id', 'user_profile_id'):
group_id = info['user_group_id']
user_profile_id = info['user_profile_id']
self.user_group_members[group_id].append(user_profile_id)
def get_user_by_name(self, name: str) -> Optional[FullNameInfo]:
# warning: get_user_by_name is not dependable if two
# users of the same full name are mentioned. Use
# get_user_by_id where possible.
return self.full_name_info.get(name.lower(), None)
def get_user_by_id(self, id: str) -> Optional[FullNameInfo]:
return self.user_id_info.get(int(id), None)
def get_user_ids(self) -> Set[int]:
"""
Returns the user IDs that might have been mentioned by this
content. Note that because this data structure has not parsed
the message and does not know about escaping/code blocks, this
will overestimate the list of user ids.
"""
return set(self.user_id_info.keys())
def get_user_group(self, name: str) -> Optional[UserGroup]:
return self.user_group_name_info.get(name.lower(), None)
def get_group_members(self, user_group_id: int) -> List[int]:
return self.user_group_members.get(user_group_id, [])
def get_user_group_name_info(realm_id: int, user_group_names: Set[str]) -> Dict[str, UserGroup]:
if not user_group_names:
return dict()
rows = UserGroup.objects.filter(realm_id=realm_id,
name__in=user_group_names)
dct = {row.name.lower(): row for row in rows}
return dct
def get_stream_name_info(realm: Realm, stream_names: Set[str]) -> Dict[str, FullNameInfo]:
if not stream_names:
return dict()
q_list = {
Q(name=name)
for name in stream_names
}
rows = get_active_streams(
realm=realm,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'name',
)
dct = {
row['name']: row
for row in rows
}
return dct
def do_convert(content: str,
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
sent_by_bot: bool=False,
translate_emoticons: bool=False,
mention_data: Optional[MentionData]=None,
email_gateway: bool=False,
no_previews: bool=False) -> str:
"""Convert Markdown to HTML, with Zulip-specific settings and hacks."""
# This logic is a bit convoluted, but the overall goal is to support a range of use cases:
# * Nothing is passed in other than content -> just run default options (e.g. for docs)
# * message is passed, but no realm is -> look up realm from message
# * message_realm is passed -> use that realm for bugdown purposes
if message is not None:
if message_realm is None:
message_realm = message.get_realm()
if message_realm is None:
realm_filters_key = DEFAULT_BUGDOWN_KEY
else:
realm_filters_key = message_realm.id
if message and hasattr(message, 'id') and message.id:
logging_message_id = 'id# ' + str(message.id)
else:
logging_message_id = 'unknown'
if message is not None and message_realm is not None:
if message_realm.is_zephyr_mirror_realm:
if message.sending_client.name == "zephyr_mirror":
# Use slightly customized Markdown processor for content
# delivered via zephyr_mirror
realm_filters_key = ZEPHYR_MIRROR_BUGDOWN_KEY
maybe_update_markdown_engines(realm_filters_key, email_gateway)
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
_md_engine = md_engines[md_engine_key]
else:
if DEFAULT_BUGDOWN_KEY not in md_engines:
maybe_update_markdown_engines(realm_filters_key=None, email_gateway=False)
_md_engine = md_engines[(DEFAULT_BUGDOWN_KEY, email_gateway)]
# Reset the parser; otherwise it will get slower over time.
_md_engine.reset()
# Filters such as UserMentionPattern need a message.
_md_engine.zulip_message = message
_md_engine.zulip_realm = message_realm
_md_engine.zulip_db_data = None # for now
_md_engine.image_preview_enabled = image_preview_enabled(
message, message_realm, no_previews)
_md_engine.url_embed_preview_enabled = url_embed_preview_enabled(
message, message_realm, no_previews)
# Pre-fetch data from the DB that is used in the bugdown thread
if message_realm is not None:
# Here we fetch the data structures needed to render
# mentions/avatars/stream mentions from the database, but only
# if there is syntax in the message that might use them, since
# the fetches are somewhat expensive and these types of syntax
# are uncommon enough that it's a useful optimization.
if mention_data is None:
mention_data = MentionData(message_realm.id, content)
emails = possible_avatar_emails(content)
email_info = get_email_info(message_realm.id, emails)
stream_names = possible_linked_stream_names(content)
stream_name_info = get_stream_name_info(message_realm, stream_names)
if content_has_emoji_syntax(content):
active_realm_emoji = message_realm.get_active_emoji()
else:
active_realm_emoji = dict()
_md_engine.zulip_db_data = {
'realm_alert_words_automaton': realm_alert_words_automaton,
'email_info': email_info,
'mention_data': mention_data,
'active_realm_emoji': active_realm_emoji,
'realm_uri': message_realm.uri,
'sent_by_bot': sent_by_bot,
'stream_names': stream_name_info,
'translate_emoticons': translate_emoticons,
}
try:
# Spend at most 5 seconds rendering; this protects the backend
# from being overloaded by bugs (e.g. markdown logic that is
# extremely inefficient in corner cases) as well as user
# errors (e.g. a realm filter that makes some syntax
# infinite-loop).
rendered_content = timeout(5, _md_engine.convert, content)
# Throw an exception if the content is huge; this protects the
# rest of the codebase from any bugs where we end up rendering
# something huge.
if len(rendered_content) > MAX_MESSAGE_LENGTH * 10:
raise BugdownRenderingException(
f'Rendered content exceeds {MAX_MESSAGE_LENGTH * 10} characters (message {logging_message_id})'
)
return rendered_content
except Exception:
cleaned = privacy_clean_markdown(content)
# NOTE: Don't change this message without also changing the
# logic in logging_handlers.py or we can create recursive
# exceptions.
bugdown_logger.exception(
'Exception in Markdown parser; input (sanitized) was: %s\n (message %s)',
cleaned,
logging_message_id,
)
raise BugdownRenderingException()
finally:
# These next three lines are slightly paranoid, since
# we always set these right before actually using the
# engine, but better safe then sorry.
_md_engine.zulip_message = None
_md_engine.zulip_realm = None
_md_engine.zulip_db_data = None
bugdown_time_start = 0.0
bugdown_total_time = 0.0
bugdown_total_requests = 0
def get_bugdown_time() -> float:
return bugdown_total_time
def get_bugdown_requests() -> int:
return bugdown_total_requests
def bugdown_stats_start() -> None:
global bugdown_time_start
bugdown_time_start = time.time()
def bugdown_stats_finish() -> None:
global bugdown_total_time
global bugdown_total_requests
global bugdown_time_start
bugdown_total_requests += 1
bugdown_total_time += (time.time() - bugdown_time_start)
def convert(content: str,
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
sent_by_bot: bool=False,
translate_emoticons: bool=False,
mention_data: Optional[MentionData]=None,
email_gateway: bool=False,
no_previews: bool=False) -> str:
bugdown_stats_start()
ret = do_convert(content, realm_alert_words_automaton,
message, message_realm, sent_by_bot,
translate_emoticons, mention_data, email_gateway,
no_previews=no_previews)
bugdown_stats_finish()
return ret
| {
"content_hash": "c6c3948c8e16a8e9dd21e56db9c854c6",
"timestamp": "",
"source": "github",
"line_count": 2423,
"max_line_length": 160,
"avg_line_length": 41.803549319026004,
"alnum_prop": 0.5907592062395103,
"repo_name": "timabbott/zulip",
"id": "9654abe8514b5cc55d9de656821105e617dfcc9f",
"size": "101416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/lib/bugdown/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "429356"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "844217"
},
{
"name": "JavaScript",
"bytes": "3259448"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "74427"
},
{
"name": "Python",
"bytes": "7825440"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "123706"
},
{
"name": "TSQL",
"bytes": "314"
},
{
"name": "TypeScript",
"bytes": "22102"
}
],
"symlink_target": ""
} |
import re
import json
from pygics import Burst
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from archon.settings import SESSION_COOKIE_AGE
from archon.view import *
ARCHON_DEBUG = False
class ManagerAbstraction:
__MANAGER__ = None
@classmethod
def instance(cls, *argv, **kargs):
if cls.__MANAGER__ == None: cls.__MANAGER__ = cls(*argv, **kargs)
return cls.__MANAGER__
def getSummary(self, r, m, v):
return {
'name' : '?',
'icon' : 'Default.png',
'desc' : 'This is Unknown Manager',
'link' : '/dashboard',
'view' : DIV()
}
class ArchonReq:
def __init__(self, request, method, path, query, data):
self.Request = request
self.Method = method
self.Path = path
self.Query = query
self.Data = data
def __str__(self):
return '%s:%s\nQuery:%s\nData:%s' % (self.Method, self.Path, self.Query, self.Data)
class ArchonView:
class PageContent(TAG):
def __init__(self):
TAG.__init__(self, 'div', CLASS='pagecontent')
def __init__(self, app, lang):
self.Menu = DIV()
self.Page = ArchonView.PageContent()
self._app = app
self._lang = lang
def __call__(self, key):
glb_locale = archon_locales['GLOBAL']
if self._app in archon_locales:
app_locale = archon_locales[self._app]
if key in app_locale:
key_locale = app_locale[key]
for lang in self._lang:
if lang in key_locale: return key_locale[lang]
if key in glb_locale:
key_locale = glb_locale[key]
for lang in self._lang:
if lang in key_locale: return key_locale[lang]
return key
def __render__(self):
return {'menu' : self.Menu, 'page' : self.Page}
@classmethod
def __error__(cls, title, msg):
return {'menu' : DIV(), 'page' : ALERT(title, msg, CLASS='alert-danger')}
def pageview(manager_class, **async_path):
def wrapper(view):
@login_required
def decofunc(request):
request.session.set_expiry(SESSION_COOKIE_AGE)
method = request.method
path = filter(None, request.path.split('/'))
lang = filter(None, re.split(';|,|q=0.\d', request.META['HTTP_ACCEPT_LANGUAGE']))
app = view.__module__.split('.')[1]
v = ArchonView(app, lang)
try: m = manager_class.instance()
except Exception as e: return JsonResponse(ArchonView.__error__(v('manager allocation error'), str(e)))
try:
if method == 'GET':
query = dict(request.GET)
data = {}
elif method == 'POST':
query = dict(request.POST)
if not hasattr(request, '_body') and request._read_started: data = request.FILES
else: data = json.loads(request.body)
elif method == 'PUT':
query = dict(request.PUT)
if not hasattr(request, '_body') and request._read_started: data = request.FILES
else: data = json.loads(request.body)
elif method == 'DELETE':
query = {}
data = {}
else:
query = {}
data = {}
except Exception as e: return JsonResponse(ArchonView.__error__(v('request error'), str(e)))
r = ArchonReq(request, method, path, query, data)
async_path_names = async_path.keys()
for async_path_name in async_path_names:
if async_path_name in path:
try: return JsonResponse(async_path[async_path_name](r, m, v))
except Exception as e: return JsonResponse(ArchonView.__error__(v('application error'), str(e)))
try: view(r, m, v)
except Exception as e: return JsonResponse(ArchonView.__error__(v('application error'), str(e)))
return JsonResponse(v.__render__())
def decofunc_debug(request):
method = request.method
path = filter(None, request.path.split('/'))
lang = filter(None, re.split(';|,|q=0.\d', request.META['HTTP_ACCEPT_LANGUAGE']))
app = view.__module__.split('.')[1]
v = ArchonView(app, lang)
m = manager_class.instance()
if method == 'GET':
query = dict(request.GET)
data = {}
elif method == 'POST':
query = dict(request.POST)
if not hasattr(request, '_body') and request._read_started: data = request.FILES
else: data = json.loads(request.body)
elif method == 'PUT':
query = dict(request.PUT)
if not hasattr(request, '_body') and request._read_started: data = request.FILES
else: data = json.loads(request.body)
elif method == 'DELETE':
query = {}
data = {}
else:
query = {}
data = {}
r = ArchonReq(request, method, path, query, data)
async_path_names = async_path.keys()
for async_path_name in async_path_names:
if async_path_name in path:
return JsonResponse(async_path[async_path_name](r, m, v))
view(r, m, v)
return JsonResponse(v.__render__())
if ARCHON_DEBUG: return decofunc_debug
else: return decofunc
return wrapper
def modelview(model):
admin.site.register(model, admin.ModelAdmin)
| {
"content_hash": "6326b5e145049501924416ff8ffab3b5",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 116,
"avg_line_length": 36.49122807017544,
"alnum_prop": 0.49198717948717946,
"repo_name": "HyechurnJang/archon",
"id": "f061361346e5c53e6f9bfc725e3bc9a264fe2453",
"size": "9053",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "archon/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "250717"
},
{
"name": "HTML",
"bytes": "1529894"
},
{
"name": "JavaScript",
"bytes": "3797796"
},
{
"name": "Python",
"bytes": "356568"
},
{
"name": "Shell",
"bytes": "1020"
}
],
"symlink_target": ""
} |
"""
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
from collections import Mapping, OrderedDict
import copy
import warnings
from django.core.exceptions import FieldError
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models.constants import LOOKUP_SEP
from django.db.models.aggregates import refs_aggregate
from django.db.models.expressions import ExpressionNode
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query_utils import Q
from django.db.models.related import PathInfo
from django.db.models.sql import aggregates as base_aggregates_module
from django.db.models.sql.constants import (QUERY_TERMS, ORDER_DIR, SINGLE,
ORDER_PATTERN, JoinInfo, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin, Col
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode,
ExtraWhere, AND, OR, EmptyWhere)
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_text
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.aggregate_select = {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.column_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %s>" % self
def __str__(self):
_type = dict if isinstance(self.params, Mapping) else tuple
return self.sql % _type(self.params)
def _execute_query(self):
self.cursor = connections[self.using].cursor()
self.cursor.execute(self.sql, self.params)
class Query(object):
"""
A single SQL query.
"""
# SQL join types. These are part of the class because their string forms
# vary from database to database and can be customised by a subclass.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
alias_prefix = 'T'
subq_aliases = frozenset([alias_prefix])
query_terms = QUERY_TERMS
aggregates_module = base_aggregates_module
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# type they are. The key is the alias of the joined table (possibly
# the table name) and the value is JoinInfo from constants.py.
self.alias_map = {}
self.table_map = {} # Maps table names to list of aliases.
self.join_map = {}
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
self.included_inherited_models = {}
# SQL-related attributes
# Select and related select clauses as SelectInfo instances.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), annotate(),
# subqueries...)
self.select = []
# The related_select_cols is used for columns needed for
# select_related - this is populated in the compile stage.
self.related_select_cols = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
self.group_by = None
self.having = where()
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = []
self.select_for_update = False
self.select_for_update_nowait = False
self.select_related = False
# SQL aggregate-related attributes
# The _aggregates will be an OrderedDict when used. Due to the cost
# of creating OrderedDict this attribute is created lazily (in
# self.aggregates property).
self._aggregates = None # Maps alias -> SQL aggregate function
self.aggregate_select_mask = None
self._aggregate_select_cache = None
# Arbitrary maximum limit for select_related. Prevents infinite
# recursion. Can be changed by the depth parameter to select_related().
self.max_depth = 5
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
# The _extra attribute is an OrderedDict, lazily created similarly to
# .aggregates
self._extra = None # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
@property
def extra(self):
if self._extra is None:
self._extra = OrderedDict()
return self._extra
@property
def aggregates(self):
if self._aggregates is None:
self._aggregates = OrderedDict()
return self._aggregates
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Returns the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def prepare(self):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.aggregate_select.items():
connection.ops.check_aggregate_support(aggregate)
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.table_map = self.table_map.copy()
obj.join_map = self.join_map.copy()
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.included_inherited_models = self.included_inherited_models.copy()
obj.select = self.select[:]
obj.related_select_cols = []
obj.tables = self.tables[:]
obj.where = self.where.clone()
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
else:
obj.group_by = self.group_by[:]
obj.having = self.having.clone()
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.distinct_fields = self.distinct_fields[:]
obj.select_for_update = self.select_for_update
obj.select_for_update_nowait = self.select_for_update_nowait
obj.select_related = self.select_related
obj.related_select_cols = []
obj._aggregates = self._aggregates.copy() if self._aggregates is not None else None
if self.aggregate_select_mask is None:
obj.aggregate_select_mask = None
else:
obj.aggregate_select_mask = self.aggregate_select_mask.copy()
# _aggregate_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both aggregates and
# _aggregate_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._aggregate_select_cache = None
obj.max_depth = self.max_depth
obj._extra = self._extra.copy() if self._extra is not None else None
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = copy.copy(self.deferred_loading[0]), self.deferred_loading[1]
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
if 'alias_prefix' in self.__dict__:
obj.alias_prefix = self.alias_prefix
if 'subq_aliases' in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def resolve_aggregate(self, value, aggregate, connection):
"""Resolve the value of aggregates returned by the database to
consistent (and reasonable) types.
This is required because of the predisposition of certain backends
to return Decimal and long types when they are not needed.
"""
if value is None:
if aggregate.is_ordinal:
return 0
# Return None as-is
return value
elif aggregate.is_ordinal:
# Any ordinal aggregate (e.g., count) returns an int
return int(value)
elif aggregate.is_computed:
# Any computed aggregate (e.g., avg) returns a float
return float(value)
else:
# Return value depends on the type of the field being processed.
backend_converters = connection.ops.get_db_converters(aggregate.field.get_internal_type())
field_converters = aggregate.field.get_db_converters(connection)
for converter in backend_converters:
value = converter(value, aggregate.field)
for converter in field_converters:
value = converter(value, connection)
return value
def get_aggregation(self, using, force_subq=False):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.aggregate_select:
return {}
# If there is a group by clause, aggregating does not add useful
# information but retrieves only the first row. Aggregate
# over the subquery instead.
if self.group_by is not None or force_subq:
from django.db.models.sql.subqueries import AggregateQuery
query = AggregateQuery(self.model)
obj = self.clone()
if not force_subq:
# In forced subq case the ordering and limits will likely
# affect the results.
obj.clear_ordering(True)
obj.clear_limits()
obj.select_for_update = False
obj.select_related = False
obj.related_select_cols = []
relabels = dict((t, 'subquery') for t in self.tables)
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
for alias, aggregate in self.aggregate_select.items():
if aggregate.is_summary:
query.aggregates[alias] = aggregate.relabeled_clone(relabels)
del obj.aggregate_select[alias]
try:
query.add_subquery(obj, using)
except EmptyResultSet:
return dict(
(alias, None)
for alias in query.aggregate_select
)
else:
query = self
self.select = []
self.default_cols = False
self._extra = {}
self.remove_inherited_models()
query.clear_ordering(True)
query.clear_limits()
query.select_for_update = False
query.select_related = False
query.related_select_cols = []
result = query.get_compiler(using).execute_sql(SINGLE)
if result is None:
result = [None for q in query.aggregate_select.items()]
return dict(
(alias, self.resolve_aggregate(val, aggregate, connection=connections[using]))
for (alias, aggregate), val
in zip(query.aggregate_select.items(), result)
)
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select or (self.distinct and self.distinct_fields):
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
try:
obj.add_subquery(subquery, using=using)
except EmptyResultSet:
# add_subquery evaluates the query, if it's an EmptyResultSet
# then there are can be no results, and therefore there the
# count is obviously 0
return 0
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def has_filters(self):
return self.where or self.having
def has_results(self, using):
q = self.clone()
if not q.distinct:
q.clear_select_clause()
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
self.remove_inherited_models()
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.tables)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == self.INNER)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
for alias in rhs.tables[1:]:
table, _, join_type, lhs, join_cols, nullable, join_field = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
lhs = change_map.get(lhs, lhs)
new_alias = self.join(
(lhs, table, join_cols), reuse=reuse,
nullable=nullable, join_field=join_field)
if join_type == self.INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
if rhs.where:
w = rhs.where.clone()
w.relabel_aliases(change_map)
if not self.where:
# Since 'self' matches everything, add an explicit "include
# everything" where-constraint so that connections between the
# where clauses won't exclude valid results.
self.where.add(EverythingNode(), AND)
elif self.where:
# rhs has an empty where clause.
w = self.where_class()
w.add(EverythingNode(), AND)
else:
w = self.where_class()
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col, field in rhs.select:
if isinstance(col, (list, tuple)):
new_col = change_map.get(col[0], col[0]), col[1]
self.select.append(SelectInfo(new_col, field))
else:
new_col = col.relabeled_clone(change_map)
self.select.append(SelectInfo(new_col, field))
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self._extra and rhs._extra:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by[:] if rhs.order_by else self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: set([orig_opts.pk])}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field_by_name(name)[0]
if is_reverse_o2o(source):
cur_model = source.model
else:
cur_model = source.rel.to
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field, model, _, _ = opts.get_field_by_name(parts[-1])
if model is None:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in six.iteritems(seen):
for field, m in model._meta.get_fields_with_model():
if field in values:
continue
add_to_dict(workset, m or model, field)
for model, values in six.iteritems(must_include):
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in six.iteritems(workset):
callback(target, model, values)
else:
for model, values in six.iteritems(must_include):
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in six.iteritems(seen):
callback(target, model, values)
def deferred_to_columns_cb(self, target, model, fields):
"""
Callback used by deferred_to_columns(). The "target" parameter should
be a set instance.
"""
table = model._meta.db_table
if table not in target:
target[table] = set()
for field in fields:
target[table].add(field.column)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promotes recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, the join is only promoted if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_cols[0][1] is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].lhs_alias
parent_louter = (
parent_alias
and self.alias_map[parent_alias].join_type == self.LOUTER)
already_louter = self.alias_map[alias].join_type == self.LOUTER
if ((self.alias_map[alias].nullable or parent_louter) and
not already_louter):
data = self.alias_map[alias]._replace(join_type=self.LOUTER)
self.alias_map[alias] = data
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map.keys()
if (self.alias_map[join].lhs_alias == alias
and join not in aliases))
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == self.LOUTER:
self.alias_map[alias] = self.alias_map[alias]._replace(join_type=self.INNER)
parent_alias = self.alias_map[alias].lhs_alias
if self.alias_map[parent_alias].join_type == self.INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
This method will reset reference counts for aliases so that they match
the value passed in :param to_counts:.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
def relabel_column(col):
if isinstance(col, (list, tuple)):
old_alias = col[0]
return (change_map.get(old_alias, old_alias), col[1])
else:
return col.relabeled_clone(change_map)
# 1. Update references in "select" (normal columns plus aliases),
# "group by", "where" and "having".
self.where.relabel_aliases(change_map)
self.having.relabel_aliases(change_map)
if self.group_by:
self.group_by = [relabel_column(col) for col in self.group_by]
self.select = [SelectInfo(relabel_column(s.col), s.field)
for s in self.select]
if self._aggregates:
self._aggregates = OrderedDict(
(key, relabel_column(col)) for key, col in self._aggregates.items())
# 2. Rename the alias in the internal table/alias datastructures.
for ident, aliases in self.join_map.items():
del self.join_map[ident]
aliases = tuple(change_map.get(a, a) for a in aliases)
ident = (change_map.get(ident[0], ident[0]),) + ident[1:]
self.join_map[ident] = aliases
for old_alias, new_alias in six.iteritems(change_map):
alias_data = self.alias_map[old_alias]
alias_data = alias_data._replace(rhs_alias=new_alias)
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
self.alias_map[new_alias] = alias_data
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
for pos, alias in enumerate(self.tables):
if alias == old_alias:
self.tables[pos] = new_alias
break
for key, alias in self.included_inherited_models.items():
if alias in change_map:
self.included_inherited_models[key] = change_map[alias]
# 3. Update any joins that refer to the old alias.
for alias, data in six.iteritems(self.alias_map):
lhs = data.lhs_alias
if lhs in change_map:
data = data._replace(lhs_alias=change_map[lhs])
self.alias_map[alias] = data
def bump_prefix(self, outer_query):
"""
Changes the alias prefix to the next letter in the alphabet in a way
that the outer query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call.
"""
if self.alias_prefix != outer_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
self.alias_prefix = chr(ord(self.alias_prefix) + 1)
while self.alias_prefix in self.subq_aliases:
self.alias_prefix = chr(ord(self.alias_prefix) + 1)
assert self.alias_prefix < 'Z'
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)
change_map = OrderedDict()
for pos, alias in enumerate(self.tables):
new_alias = '%s%d' % (self.alias_prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join((None, self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count. Note that after execution, the reference counts are zeroed, so
tables added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, connection, reuse=None, nullable=False, join_field=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, join_cols) where 'lhs' is either an existing
table alias or a table name. 'join_cols' is a tuple of tuples containing
columns to join on ((l_id1, r_id1), (l_id2, r_id2)). The join corresponds
to the SQL equivalent of::
lhs.l_id1 = table.r_id1 AND lhs.l_id2 = table.r_id2
The 'reuse' parameter can be either None which means all joins
(matching the connection) are reusable, or it can be a set containing
the aliases that can be reused.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure we do not generate chains like t1 LOUTER t2 INNER t3. All new
joins are created as LOUTER if nullable is True.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
The 'join_field' is the field we are joining along (if any).
"""
lhs, table, join_cols = connection
assert lhs is None or join_field is not None
existing = self.join_map.get(connection, ())
if reuse is None:
reuse = existing
else:
reuse = [a for a in existing if a in reuse]
for alias in reuse:
if join_field and self.alias_map[alias].join_field != join_field:
# The join_map doesn't contain join_field (mainly because
# fields in Query structs are problematic in pickling), so
# check that the existing join is created using the same
# join_field used for the under work join.
continue
self.ref_alias(alias)
return alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(table, create=True)
if not lhs:
# Not all tables need to be joined to anything. No join type
# means the later columns are ignored.
join_type = None
elif self.alias_map[lhs].join_type == self.LOUTER or nullable:
join_type = self.LOUTER
else:
join_type = self.INNER
join = JoinInfo(table, alias, join_type, lhs, join_cols or ((None, None),), nullable,
join_field)
self.alias_map[alias] = join
if connection in self.join_map:
self.join_map[connection] += (alias,)
else:
self.join_map[connection] = (alias,)
return alias
def setup_inherited_models(self):
"""
If the model that is the basis for this QuerySet inherits other models,
we need to ensure that those other models have their tables included in
the query.
We do this as a separate step so that subclasses know which
tables are going to be active in the query, without needing to compute
all the select columns (this method is called from pre_sql_setup(),
whereas column determination is a later part, and side-effect, of
as_sql()).
"""
opts = self.get_meta()
root_alias = self.tables[0]
seen = {None: root_alias}
for field, model in opts.get_fields_with_model():
if model not in seen:
self.join_parent_model(opts, model, root_alias, seen)
self.included_inherited_models = seen
def join_parent_model(self, opts, model, alias, seen):
"""
Makes sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if chain is None:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
return seen[int_model]
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
_, _, _, joins, _ = self.setup_joins(
[link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = joins[-1]
return alias or seen[None]
def remove_inherited_models(self):
"""
Undoes the effects of setup_inherited_models(). Should be called
whenever select columns (self.select) are set explicitly.
"""
for key, alias in self.included_inherited_models.items():
if key:
self.unref_alias(alias)
self.included_inherited_models = {}
def add_aggregate(self, aggregate, model, alias, is_summary):
"""
Adds a single aggregate expression to the Query
"""
opts = model._meta
field_list = aggregate.lookup.split(LOOKUP_SEP)
if len(field_list) == 1 and self._aggregates and aggregate.lookup in self.aggregates:
# Aggregate is over an annotation
field_name = field_list[0]
col = field_name
source = self.aggregates[field_name]
if not is_summary:
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
aggregate.name, field_name, field_name))
elif ((len(field_list) > 1) or
(field_list[0] not in [i.name for i in opts.fields]) or
self.group_by is None or
not is_summary):
# If:
# - the field descriptor has more than one part (foo__bar), or
# - the field descriptor is referencing an m2m/m2o field, or
# - this is a reference to a model field (possibly inherited), or
# - this is an annotation over a model field
# then we need to explore the joins that are required.
# Join promotion note - we must not remove any rows here, so use
# outer join if there isn't any existing join.
_, sources, opts, join_list, path = self.setup_joins(
field_list, opts, self.get_initial_alias())
# Process the join chain to see if it can be trimmed
targets, _, join_list = self.trim_joins(sources, join_list, path)
col = targets[0].column
source = sources[0]
col = (join_list[-1], col)
else:
# The simplest cases. No joins required -
# just reference the provided column alias.
field_name = field_list[0]
source = opts.get_field(field_name)
col = field_name
# We want to have the alias in SELECT clause even if mask is set.
self.append_aggregate_mask([alias])
# Add the aggregate to the query
aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
def prepare_lookup_value(self, value, lookups, can_reuse):
# Default lookup if none given is exact.
if len(lookups) == 0:
lookups = ['exact']
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookups[-1] not in ('exact', 'iexact'):
raise ValueError("Cannot use None as a query value")
lookups[-1] = 'isnull'
value = True
elif callable(value):
warnings.warn(
"Passing callable arguments to queryset is deprecated.",
RemovedInDjango19Warning, stacklevel=2)
value = value()
elif isinstance(value, ExpressionNode):
# If value is a query expression, evaluate it
value = SQLEvaluator(value, self, reuse=can_reuse)
if hasattr(value, 'query') and hasattr(value.query, 'bump_prefix'):
value = value._clone()
value.query.bump_prefix(self)
if hasattr(value, 'bump_prefix'):
value = value.clone()
value.bump_prefix(self)
# For Oracle '' is equivalent to null. The check needs to be done
# at this stage because join promotion can't be done at compiler
# stage. Using DEFAULT_DB_ALIAS isn't nice, but it is the best we
# can do here. Similar thing is done in is_nullable(), too.
if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
lookups[-1] == 'exact' and value == ''):
value = True
lookups[-1] = 'isnull'
return value, lookups
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (eg: 'foobar__id__icontains')
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self._aggregates:
aggregate, aggregate_lookups = refs_aggregate(lookup_splitted, self.aggregates)
if aggregate:
return aggregate_lookups, (), aggregate
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) == 0:
lookup_parts = ['exact']
elif len(lookup_parts) > 1:
if not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".' %
(lookup, self.get_meta().model.__name__))
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts):
"""
Checks whether the object passed while querying is of the correct type.
If not, it raises a ValueError specifying the wrong object.
"""
if hasattr(value, '_meta'):
if not (value._meta.concrete_model == opts.concrete_model
or opts.concrete_model in value._meta.get_parent_list()
or value._meta.concrete_model in opts.get_parent_list()):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.' %
(value, opts.object_name))
def check_related_objects(self, field, value, opts):
"""
Checks the type of object passed to query relations.
"""
if field.rel:
# testing for iterable of models
if hasattr(value, '__iter__'):
# Check if the iterable has a model attribute, if so
# it is likely something like a QuerySet.
if hasattr(value, 'model') and hasattr(value.model, '_meta'):
model = value.model
if not (model == opts.concrete_model
or opts.concrete_model in model._meta.get_parent_list()
or model in opts.get_parent_list()):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' %
(model._meta.model_name, opts.object_name))
else:
for v in value:
self.check_query_object_type(v, opts)
else:
# expecting single model instance here
self.check_query_object_type(value, opts)
def build_lookup(self, lookups, lhs, rhs):
lookups = lookups[:]
while lookups:
lookup = lookups[0]
if len(lookups) == 1:
final_lookup = lhs.get_lookup(lookup)
if final_lookup:
return final_lookup(lhs, rhs)
# We didn't find a lookup, so we are going to try get_transform
# + get_lookup('exact').
lookups.append('exact')
next = lhs.get_transform(lookup)
if next:
lhs = next(lhs, lookups)
else:
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted." %
(lookup, lhs.output_field.__class__.__name__))
lookups = lookups[1:]
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None, connector=AND):
"""
Builds a WhereNode for a single filter clause, but doesn't add it
to this Query. Query.add_q() will then add this filter to the where
or having Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_netageted and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_aggregate = self.solve_lookup_type(arg)
# Work out the lookup type and remove it from the end of 'parts',
# if necessary.
value, lookups = self.prepare_lookup_value(value, lookups, can_reuse)
used_joins = getattr(value, '_used_joins', [])
clause = self.where_class()
if reffed_aggregate:
condition = self.build_lookup(lookups, reffed_aggregate, value)
if not condition:
# Backwards compat for custom lookups
assert len(lookups) == 1
condition = (reffed_aggregate, lookups[0], value)
clause.add(condition, AND)
return clause, []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated
try:
field, sources, opts, join_list, path = self.setup_joins(
parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many)
self.check_related_objects(field, value, opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_list
except MultiJoin as e:
return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse, e.names_with_path)
if can_reuse is not None:
can_reuse.update(join_list)
used_joins = set(used_joins).union(set(join_list))
# Process the join list to see if we can remove any non-needed joins from
# the far end (fewer tables in a query is better).
targets, alias, join_list = self.trim_joins(sources, join_list, path)
if hasattr(field, 'get_lookup_constraint'):
# For now foreign keys get special treatment. This should be
# refactored when composite fields lands.
condition = field.get_lookup_constraint(self.where_class, alias, targets, sources,
lookups, value)
lookup_type = lookups[-1]
else:
assert(len(targets) == 1)
col = Col(alias, targets[0], field)
condition = self.build_lookup(lookups, col, value)
if not condition:
# Backwards compat for custom lookups
if lookups[0] not in self.query_terms:
raise FieldError(
"Join on field '%s' not permitted. Did you "
"misspell '%s' for the lookup type?" %
(col.output_field.name, lookups[0]))
if len(lookups) > 1:
raise FieldError("Nested lookup '%s' not supported." %
LOOKUP_SEP.join(lookups))
condition = (Constraint(alias, targets[0].column, field), lookups[0], value)
lookup_type = lookups[-1]
else:
lookup_type = condition.lookup_name
clause.add(condition, AND)
require_outer = lookup_type == 'isnull' and value is True and not current_negated
if current_negated and (lookup_type != 'isnull' or value is False):
require_outer = True
if (lookup_type != 'isnull' and (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == self.LOUTER)):
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
lookup_class = targets[0].get_lookup('isnull')
clause.add(lookup_class(Col(alias, targets[0], sources[0]), False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_clause):
self.add_q(Q(**{filter_clause[0]: filter_clause[1]}))
def need_having(self, obj):
"""
Returns whether or not all elements of this q_object need to be put
together in the HAVING clause.
"""
if not self._aggregates:
return False
if not isinstance(obj, Node):
return (refs_aggregate(obj[0].split(LOOKUP_SEP), self.aggregates)[0]
or (hasattr(obj[1], 'contains_aggregate')
and obj[1].contains_aggregate(self.aggregates)))
return any(self.need_having(c) for c in obj.children)
def split_having_parts(self, q_object, negated=False):
"""
Returns a list of q_objects which need to go into the having clause
instead of the where clause. Removes the splitted out nodes from the
given q_object. Note that the q_object is altered, so cloning it is
needed.
"""
having_parts = []
for c in q_object.children[:]:
# When constructing the having nodes we need to take care to
# preserve the negation status from the upper parts of the tree
if isinstance(c, Node):
# For each negated child, flip the in_negated flag.
in_negated = c.negated ^ negated
if c.connector == OR and self.need_having(c):
# A subtree starting from OR clause must go into having in
# whole if any part of that tree references an aggregate.
q_object.children.remove(c)
having_parts.append(c)
c.negated = in_negated
else:
having_parts.extend(
self.split_having_parts(c, in_negated)[1])
elif self.need_having(c):
q_object.children.remove(c)
new_q = self.where_class(children=[c], negated=negated)
having_parts.append(new_q)
return q_object, having_parts
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for
splitting the given q_object into where and having parts and
setting up some internal variables.
"""
if not self.need_having(q_object):
where_part, having_parts = q_object, []
else:
where_part, having_parts = self.split_having_parts(
q_object.clone(), q_object.negated)
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = set(
(a for a in self.alias_map if self.alias_map[a].join_type == self.INNER))
clause, require_inner = self._add_q(where_part, self.used_aliases)
self.where.add(clause, AND)
for hp in having_parts:
clause, _ = self._add_q(hp, self.used_aliases)
self.having.add(clause, AND)
self.demote_joins(existing_inner)
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False):
"""
Adds a Q-object to the current filter.
"""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector,
negated=q_object.negated)
joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause, needed_inner = self._add_q(
child, used_aliases, branch_negated,
current_negated)
joinpromoter.add_votes(needed_inner)
else:
child_clause, needed_inner = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated, connector=connector)
joinpromoter.add_votes(needed_inner)
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walks the names path and turns them PathInfo tuples. Note that a
single name in 'names' can generate multiple PathInfos (m2m for
example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
Returns a list of PathInfo tuples. In addition returns the final field
(the last used join field), and target (which is a field guaranteed to
contain the same value as the final field).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
# We didn't found the current field, so move position back
# one step.
pos -= 1
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = opts.concrete_model
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.rel.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
cur_names_with_path[1].append(
PathInfo(final_field.model._meta, opts, targets, final_field, False, True)
)
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info()
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
break
if pos == -1 or (fail_on_missing and pos + 1 != len(names)):
self.raise_field_error(opts, name)
return path, final_field, targets, names[pos + 1:]
def raise_field_error(self, opts, name):
available = opts.get_all_field_names() + list(self.aggregate_select)
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(available)))
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Returns the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins and the
field path travelled to generate the joins.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# First, generate the path for the names
path, final_field, targets, rest = self.names_to_path(
names, opts, allow_many, fail_on_missing=True)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for pos, join in enumerate(path):
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = alias, opts.db_table, join.join_field.get_joining_columns()
reuse = can_reuse if join.m2m else None
alias = self.join(
connection, reuse=reuse, nullable=nullable, join_field=join.join_field)
joins.append(alias)
if hasattr(final_field, 'field'):
final_field = final_field.field
return final_field, targets, opts, joins, path
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Returns the final target field and table alias and the new active
joins.
We will always trim any direct join if we have the target column
available already in the previous table. Reverse joins can't be
trimmed as we don't know if there is anything on the other side of
the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
join_targets = set(t.column for t in info.join_field.foreign_related_fields)
cur_targets = set(t.column for t in targets)
if not cur_targets.issubset(join_targets):
break
targets = tuple(r[0] for r in info.join_field.related_fields if r[1].column in cur_targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
As an example we could have original filter ~Q(child__name='foo').
We would get here with filter_expr = child__name, prefix = child and
can_reuse is a set of joins usable for filters in the original query.
We will turn this into equivalent of:
WHERE NOT (pk IN (SELECT parent_id FROM thetable
WHERE name = 'foo' AND parent_id IS NOT NULL))
It might be worth it to consider using WHERE NOT EXISTS as that has
saner null handling, and is easier for the backend's optimizer to
handle.
"""
# Generate the inner query.
query = Query(self.model)
query.add_filter(filter_expr)
query.clear_ordering(True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
query.remove_inherited_models()
# Add extra check to make sure the selected field will not be null
# since we are adding an IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
alias, col = query.select[0].col
if self.is_nullable(query.select[0].field):
lookup_class = query.select[0].field.get_lookup('isnull')
lookup = lookup_class(Col(alias, query.select[0].field, query.select[0].field), False)
query.where.add(lookup, AND)
if alias in can_reuse:
select_field = query.select[0].field
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup('exact')
lookup = lookup_class(Col(query.select[0].col[0], pk, pk),
Col(alias, pk, pk))
query.where.add(lookup, AND)
condition, needed_inner = self.build_filter(
('%s__in' % trimmed_prefix, query),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
if contains_louter:
or_null_condition, _ = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where = EmptyWhere()
self.having = EmptyWhere()
def is_empty(self):
return isinstance(self.where, EmptyWhere) or isinstance(self.having, EmptyWhere)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_clause(self):
"""
Removes all fields from SELECT clause.
"""
self.select = []
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_aggregate_mask(())
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
def add_distinct_fields(self, *field_names):
"""
Adds and resolves the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
_, targets, _, joins, path = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)
targets, final_alias, joins = self.trim_joins(targets, joins, path)
for target in targets:
self.select.append(SelectInfo((final_alias, target.column), target))
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
else:
names = sorted(opts.get_all_field_names() + list(self.extra)
+ list(self.aggregate_select))
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
self.remove_inherited_models()
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or ordinals,
corresponding to column positions in the 'select' list.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not ORDER_PATTERN.match(item):
errors.append(item)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for col, _ in self.select:
self.group_by.append(col)
def add_count_column(self):
"""
Converts the query to do count(...) or count(distinct(pk)) in order to
get its size.
"""
if not self.distinct:
if not self.select:
count = self.aggregates_module.Count('*', is_summary=True)
else:
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select': %r" % self.select
count = self.aggregates_module.Count(self.select[0].col)
else:
opts = self.get_meta()
if not self.select:
count = self.aggregates_module.Count(
(self.join((None, opts.db_table, None)), opts.pk.column),
is_summary=True, distinct=True)
else:
# Because of SQL portability issues, multi-column, distinct
# counts need a sub-query -- see get_count() for details.
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select'."
count = self.aggregates_module.Count(self.select[0].col, distinct=True)
# Distinct handling is done in Count(), so don't do it at this
# level.
self.distinct = False
# Set only aggregate to be the count column.
# Clear out the select cache to reflect the new unmasked aggregates.
self._aggregates = {None: count}
self.set_aggregate_mask(None)
self.group_by = None
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
self.related_select_cols = []
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = OrderedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_text(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is an OrderedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = field_names, False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = set(f.name for f in fields)
def set_aggregate_mask(self, names):
"Set the mask of aggregates that will actually be returned by the SELECT"
if names is None:
self.aggregate_select_mask = None
else:
self.aggregate_select_mask = set(names)
self._aggregate_select_cache = None
def append_aggregate_mask(self, names):
if self.aggregate_select_mask is not None:
self.set_aggregate_mask(set(names).union(self.aggregate_select_mask))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
@property
def aggregate_select(self):
"""The OrderedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._aggregate_select_cache is not None:
return self._aggregate_select_cache
elif not self._aggregates:
return {}
elif self.aggregate_select_mask is not None:
self._aggregate_select_cache = OrderedDict(
(k, v) for k, v in self.aggregates.items()
if k in self.aggregate_select_mask
)
return self._aggregate_select_cache
else:
return self.aggregates
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self._extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = OrderedDict(
(k, v) for k, v in self.extra.items()
if k in self.extra_select_mask
)
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trims joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also sets the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Returns a lookup usable for doing outerq.filter(lookup=self). Returns
also if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [t for t in self.tables if t in self._lookup_joins or t == self.tables[0]]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == self.LOUTER:
contains_louter = True
self.unref_alias(lookup_tables[trimmed_paths])
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for LEFT JOINs because we would
# miss those rows that have nothing on the outer side.
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type != self.LOUTER:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
self.where_class, None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
self.select = [SelectInfo((select_alias, f.column), f) for f in select_fields]
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
A helper to check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
if ((connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls)
and field.empty_strings_allowed):
return True
else:
return field.null
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = set([value])
def is_reverse_o2o(field):
"""
A little helper to check if the given field is reverse-o2o. The field is
expected to be some sort of relation field or related object.
"""
return not hasattr(field, 'rel') and field.field.unique
def alias_diff(refcounts_before, refcounts_after):
"""
Given the before and after copies of refcounts works out which aliases
have been added to the after copy.
"""
# Use -1 as default value so that any join that is created, then trimmed
# is seen as added.
return set(t for t in refcounts_after
if refcounts_after[t] > refcounts_before.get(t, -1))
class JoinPromoter(object):
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.outer_votes = {}
self.inner_votes = {}
def add_votes(self, inner_votes):
"""
Add single vote per item to self.inner_votes. Parameter can be any
iterable.
"""
for voted in inner_votes:
self.inner_votes[voted] = self.inner_votes.get(voted, 0) + 1
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.inner_votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == 'OR' and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == 'AND' or (
self.effective_connector == 'OR' and votes == self.num_children):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
| {
"content_hash": "88165a9999008742ead7736f76022694",
"timestamp": "",
"source": "github",
"line_count": 2131,
"max_line_length": 111,
"avg_line_length": 43.991553261379636,
"alnum_prop": 0.5897531628016128,
"repo_name": "pwmarcz/django",
"id": "7524a3582d46d259863aeaa42a5d55cd16b42b51",
"size": "93746",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/db/models/sql/query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from django.apps import AppConfig
from django.contrib.contenttypes.checks import check_generic_foreign_keys
from django.core import checks
from django.utils.translation import ugettext_lazy as _
class ContentTypesConfig(AppConfig):
name = 'django.contrib.contenttypes'
verbose_name = _("Content Types")
def ready(self):
checks.register('models')(check_generic_foreign_keys)
| {
"content_hash": "9ece9ff58f430b01185b2678be852bc6",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 73,
"avg_line_length": 33.083333333333336,
"alnum_prop": 0.7657430730478589,
"repo_name": "errx/django",
"id": "fbe4fccfa6c45a95d4b6f5a6141d584c7f39b84c",
"size": "397",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/contrib/contenttypes/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52957"
},
{
"name": "JavaScript",
"bytes": "102668"
},
{
"name": "Python",
"bytes": "9469402"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
} |
from flask import Blueprint
from utils.crossdomains import crossdomain
from utils.config import get_app_configurations
#get app configurations
config = get_app_configurations()
static = Blueprint('static', __name__, static_folder = config['static_dir'])
@static.route('/')
def serve_index():
"""
The default request will serve index.html page
"""
return static.send_static_file('index.html')
@static.route('/<path:path>')
def serve_static_page(path):
"""
Serve the page specified in the path
"""
return static.send_static_file(path)
| {
"content_hash": "8c0a594534ffae949aa8c487d33e8e79",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 25.904761904761905,
"alnum_prop": 0.7352941176470589,
"repo_name": "DigitalSlideArchive/PanCanViewer",
"id": "ded723d02e868ab6f244daa9003f1feb7ccd3fdf",
"size": "712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webservice/routes/static.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4554"
},
{
"name": "HTML",
"bytes": "28060"
},
{
"name": "JavaScript",
"bytes": "61956"
},
{
"name": "Jupyter Notebook",
"bytes": "473499"
},
{
"name": "Python",
"bytes": "39030"
}
],
"symlink_target": ""
} |
from django.contrib import messages
from django.core.paginator import InvalidPage
from django.http import Http404, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404, redirect
from django.utils.http import urlquote
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, TemplateView
from oscar.apps.catalogue.signals import product_viewed
from oscar.core.loading import get_class, get_model
Product = get_model('catalogue', 'product')
Category = get_model('catalogue', 'category')
ProductAlert = get_model('customer', 'ProductAlert')
ProductAlertForm = get_class('customer.forms', 'ProductAlertForm')
get_product_search_handler_class = get_class(
'catalogue.search_handlers', 'get_product_search_handler_class')
class ProductDetailView(DetailView):
context_object_name = 'product'
model = Product
view_signal = product_viewed
template_folder = "catalogue"
# Whether to redirect to the URL with the right path
enforce_paths = True
# Whether to redirect child products to their parent's URL. If it's disabled,
# we display variant product details on the separate page. Otherwise, details
# displayed on parent product page.
enforce_parent = False
def get(self, request, **kwargs):
"""
Ensures that the correct URL is used before rendering a response
"""
self.object = product = self.get_object()
redirect = self.redirect_if_necessary(request.path, product)
if redirect is not None:
return redirect
# Do allow staff members so they can test layout etc.
if not self.is_viewable(product, request):
raise Http404()
response = super().get(request, **kwargs)
self.send_signal(request, response, product)
return response
def is_viewable(self, product, request):
return product.is_public or request.user.is_staff
def get_object(self, queryset=None):
# Check if self.object is already set to prevent unnecessary DB calls
if hasattr(self, 'object'):
return self.object
else:
return super().get_object(queryset)
def redirect_if_necessary(self, current_path, product):
if self.enforce_parent and product.is_child:
return HttpResponsePermanentRedirect(
product.parent.get_absolute_url())
if self.enforce_paths:
expected_path = product.get_absolute_url()
if expected_path != urlquote(current_path):
return HttpResponsePermanentRedirect(expected_path)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['alert_form'] = self.get_alert_form()
ctx['has_active_alert'] = self.get_alert_status()
return ctx
def get_alert_status(self):
# Check if this user already have an alert for this product
has_alert = False
if self.request.user.is_authenticated:
alerts = ProductAlert.objects.filter(
product=self.object, user=self.request.user,
status=ProductAlert.ACTIVE)
has_alert = alerts.exists()
return has_alert
def get_alert_form(self):
return ProductAlertForm(
user=self.request.user, product=self.object)
def send_signal(self, request, response, product):
self.view_signal.send(
sender=self, product=product, user=request.user, request=request,
response=response)
def get_template_names(self):
"""
Return a list of possible templates.
If an overriding class sets a template name, we use that. Otherwise,
we try 2 options before defaulting to catalogue/detail.html:
1). detail-for-upc-<upc>.html
2). detail-for-class-<classname>.html
This allows alternative templates to be provided for a per-product
and a per-item-class basis.
"""
if self.template_name:
return [self.template_name]
return [
'%s/detail-for-upc-%s.html' % (
self.template_folder, self.object.upc),
'%s/detail-for-class-%s.html' % (
self.template_folder, self.object.get_product_class().slug),
'%s/detail.html' % self.template_folder]
class CatalogueView(TemplateView):
"""
Browse all products in the catalogue
"""
context_object_name = "products"
template_name = 'catalogue/browse.html'
def get(self, request, *args, **kwargs):
try:
self.search_handler = self.get_search_handler(
self.request.GET, request.get_full_path(), [])
except InvalidPage:
# Redirect to page one.
messages.error(request, _('The given page number was invalid.'))
return redirect('catalogue:index')
return super().get(request, *args, **kwargs)
def get_search_handler(self, *args, **kwargs):
return get_product_search_handler_class()(*args, **kwargs)
def get_context_data(self, **kwargs):
ctx = {}
ctx['summary'] = _("All products")
search_context = self.search_handler.get_search_context_data(
self.context_object_name)
ctx.update(search_context)
return ctx
class ProductCategoryView(TemplateView):
"""
Browse products in a given category
"""
context_object_name = "products"
template_name = 'catalogue/category.html'
enforce_paths = True
def get(self, request, *args, **kwargs):
# Fetch the category; return 404 or redirect as needed
self.category = self.get_category()
potential_redirect = self.redirect_if_necessary(
request.path, self.category)
if potential_redirect is not None:
return potential_redirect
try:
self.search_handler = self.get_search_handler(
request.GET, request.get_full_path(), self.get_categories())
except InvalidPage:
messages.error(request, _('The given page number was invalid.'))
return redirect(self.category.get_absolute_url())
return super().get(request, *args, **kwargs)
def get_category(self):
return get_object_or_404(Category, pk=self.kwargs['pk'])
def redirect_if_necessary(self, current_path, category):
if self.enforce_paths:
# Categories are fetched by primary key to allow slug changes.
# If the slug has changed, issue a redirect.
expected_path = category.get_absolute_url()
if expected_path != urlquote(current_path):
return HttpResponsePermanentRedirect(expected_path)
def get_search_handler(self, *args, **kwargs):
return get_product_search_handler_class()(*args, **kwargs)
def get_categories(self):
"""
Return a list of the current category and its ancestors
"""
return self.category.get_descendants_and_self()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['category'] = self.category
search_context = self.search_handler.get_search_context_data(
self.context_object_name)
context.update(search_context)
return context
| {
"content_hash": "7b0fe883237a948be42abdc47853dfed",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 81,
"avg_line_length": 36.88,
"alnum_prop": 0.6404555314533622,
"repo_name": "sasha0/django-oscar",
"id": "d2eb07e9e34117d8c8620ca08530b13443307fb3",
"size": "7376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/oscar/apps/catalogue/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "387941"
},
{
"name": "Dockerfile",
"bytes": "544"
},
{
"name": "HTML",
"bytes": "518624"
},
{
"name": "JavaScript",
"bytes": "344864"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "1957797"
},
{
"name": "Shell",
"bytes": "1643"
}
],
"symlink_target": ""
} |
from unittest.mock import patch, Mock
import pytest
import yaml
from asphalt.core import command
DerivedApplication = None
def test_quickstart_application(monkeypatch, tmpdir, capsys):
def mock_input(text):
if text == 'Project name: ':
return 'Example Project'
elif text == 'Top level package name: ':
return 'example'
raise ValueError('Unexpected input: ' + text)
get_distribution = Mock()
get_distribution('asphalt').parsed_version.public = '1.0.0'
monkeypatch.setattr('pkg_resources.get_distribution', get_distribution)
monkeypatch.setattr('builtins.input', mock_input)
tmpdir.chdir()
command.quickstart_application()
# Check that the project directory and the top level package were created
projectdir = tmpdir.join('Example Project')
assert projectdir.check(dir=True)
assert projectdir.join('example').join('__init__.py').check(file=1)
# Check that example/application.py was properly generated
with projectdir.join('example').join('application.py').open() as f:
assert f.read() == """\
from asphalt.core.application import Application
from asphalt.core.context import ApplicationContext
class ExampleProjectApplication(Application):
@coroutine
def start(app_ctx: ApplicationContext):
pass # IMPLEMENT CUSTOM LOGIC HERE
"""
with projectdir.join('config.yml').open() as f:
config_data = f.read()
assert isinstance(yaml.load(config_data), dict)
assert config_data == """\
---
application: example:ExampleProjectApplication
components:
foo: {} # REPLACE ME
settings:
bar: 1 # REPLACE ME
logging:
version: 1
disable_existing_loggers: false
handlers:
console:
class: logging.StreamHandler
formatter: generic
formatters:
generic:
format: "%(asctime)s:%(levelname)s:%(name)s:%(message)s"
root:
handlers: [console]
level: INFO
"""
# Check that setup.py was properly generated
with projectdir.join('setup.py').open() as f:
assert f.read() == """\
from setuptools import setup
setup(
name='example',
version='1.0.0',
description='Example Project',
long_description='FILL IN HERE',
author='FILL IN HERE',
author_email='FILL IN HERE',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3'
],
zip_safe=True,
packages=[
'example'
],
install_requires=[
'asphalt >= 1.0.0, < 2.0.0'
]
)
"""
# Check that another run will raise an error because the directory exists already
pytest.raises(SystemExit, command.quickstart_application)
out, err = capsys.readouterr()
assert err == 'Error: the directory "Example Project" already exists.\n'
@pytest.mark.parametrize('unsafe', [False, True], ids=['safe', 'unsafe'])
def test_run_from_config_file(tmpdir, unsafe):
if unsafe:
app_class = '!!python/name:{}.DerivedApplication'.format(__spec__.name)
else:
app_class = '{}:DerivedApplication'.format(__spec__.name)
with patch('{}.DerivedApplication'.format(__spec__.name)) as cls:
path = tmpdir.join('test.yaml')
path.write("""\
---
application: {}
components:
foo: {{}}
bar: {{}}
settings:
setting: blah
logging:
version: 1
disable_existing_loggers: false
""".format(app_class))
command.run_from_config_file(str(path), unsafe)
components = {'foo': {}, 'bar': {}}
logging = {'version': 1, 'disable_existing_loggers': False}
settings = {'setting': 'blah'}
cls.assert_called_once_with(components=components, logging=logging, settings=settings)
cls().run.assert_called_once_with()
@pytest.mark.parametrize('args, exits', [
(['asphalt', '--help'], True),
(['asphalt'], False)
], ids=['help', 'noargs'])
def test_main_help(capsys, args, exits):
with patch('sys.argv', args):
pytest.raises(SystemExit, command.main) if exits else command.main()
out, err = capsys.readouterr()
assert out.startswith('usage: asphalt [-h]')
def test_main_run():
args = ['/bogus/path', '--unsafe']
patch1 = patch('sys.argv', ['asphalt', 'run'] + args)
patch2 = patch.object(command, 'run_from_config_file')
with patch1, patch2 as run_from_config_file:
command.main()
assert run_from_config_file.called_once_with(args)
| {
"content_hash": "ff19d707bb5100cf225c9727cd65ba76",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 94,
"avg_line_length": 29.604026845637584,
"alnum_prop": 0.6474722285196101,
"repo_name": "Siecje/asphalt",
"id": "103d3fda7b741113c0a236a1a48d654a76294063",
"size": "4411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "69624"
}
],
"symlink_target": ""
} |
import os
import unittest
from AlphaGo.util import sgf_to_gamestate
from AlphaGo.preprocessing.game_converter import run_game_converter
class TestSGFLoading(unittest.TestCase):
def test_ab_aw(self):
with open('tests/test_data/sgf_with_handicap/ab_aw.sgf', 'r') as f:
sgf_to_gamestate(f.read())
class TestCmdlineConverter(unittest.TestCase):
def test_directory_conversion(self):
args = ['--features', 'board,ones,turns_since',
'--outfile', '.tmp.testing.h5',
'--directory', 'tests/test_data/sgf/']
run_game_converter(args)
os.remove('.tmp.testing.h5')
def test_directory_walk(self):
args = ['--features', 'board,ones,turns_since',
'--outfile', '.tmp.testing.h5',
'--directory', 'tests/test_data', '--recurse']
run_game_converter(args)
os.remove('.tmp.testing.h5')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0e04f0fa4fb3688c5bb2fc7a55309d3b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 30.09375,
"alnum_prop": 0.6053997923156802,
"repo_name": "Rochester-NRT/RocAlphaGo",
"id": "8d661558ce9b8e0e8967054c465268bf6190ec4e",
"size": "963",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "tests/test_game_converter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "453141"
}
],
"symlink_target": ""
} |
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import os
import asyncio
import logging
import argparse
import selectors
import multiprocessing
from abc import ABC, abstractmethod
from typing import (
TYPE_CHECKING, Any, Set, Dict, List, Tuple, Generic, TypeVar, Optional,
cast,
)
from ...common.types import Readables, Writables, SelectableEvents
from ...common.logger import Logger
from ...common.constants import (
DEFAULT_WAIT_FOR_TASKS_TIMEOUT, DEFAULT_SELECTOR_SELECT_TIMEOUT,
DEFAULT_INACTIVE_CONN_CLEANUP_TIMEOUT,
)
if TYPE_CHECKING: # pragma: no cover
from .work import Work
from ..event import EventQueue
T = TypeVar('T')
logger = logging.getLogger(__name__)
class Threadless(ABC, Generic[T]):
"""Work executor base class.
Threadless provides an event loop, which is shared across
multiple :class:`~proxy.core.acceptor.work.Work` instances to handle
work.
Threadless takes input a `work_klass` and an `event_queue`. `work_klass`
must conform to the :class:`~proxy.core.acceptor.work.Work`
protocol. Work is received over the `event_queue`.
When a work is accepted, threadless creates a new instance of `work_klass`.
Threadless will then invoke necessary lifecycle of the
:class:`~proxy.core.acceptor.work.Work` protocol,
allowing `work_klass` implementation to handle the assigned work.
Example, :class:`~proxy.core.base.tcp_server.BaseTcpServerHandler`
implements :class:`~proxy.core.acceptor.work.Work` protocol. It
expects a client connection as work payload and hooks into the
threadless event loop to handle the client connection.
"""
def __init__(
self,
iid: str,
work_queue: T,
flags: argparse.Namespace,
event_queue: Optional['EventQueue'] = None,
) -> None:
super().__init__()
self.iid = iid
self.work_queue = work_queue
self.flags = flags
self.event_queue = event_queue
self.running = multiprocessing.Event()
self.works: Dict[int, 'Work[Any]'] = {}
self.selector: Optional[selectors.DefaultSelector] = None
# If we remove single quotes for typing hint below,
# runtime exceptions will occur for < Python 3.9.
#
# Ref https://github.com/abhinavsingh/proxy.py/runs/4279055360?check_suite_focus=true
self.unfinished: Set['asyncio.Task[bool]'] = set()
self.registered_events_by_work_ids: Dict[
# work_id
int,
# fileno, mask
SelectableEvents,
] = {}
self.wait_timeout: float = DEFAULT_WAIT_FOR_TASKS_TIMEOUT
self.cleanup_inactive_timeout: float = DEFAULT_INACTIVE_CONN_CLEANUP_TIMEOUT
self._total: int = 0
# When put at the top, causes circular import error
# since integrated ssh tunnel was introduced.
from ..connection import ( # pylint: disable=C0415
UpstreamConnectionPool,
)
self._upstream_conn_pool: Optional['UpstreamConnectionPool'] = None
self._upstream_conn_filenos: Set[int] = set()
if self.flags.enable_conn_pool:
self._upstream_conn_pool = UpstreamConnectionPool()
@property
@abstractmethod
def loop(self) -> Optional[asyncio.AbstractEventLoop]:
raise NotImplementedError()
@abstractmethod
def receive_from_work_queue(self) -> bool:
"""Work queue is ready to receive new work.
Receive it and call ``work_on_tcp_conn``.
Return True to tear down the loop."""
raise NotImplementedError()
@abstractmethod
def work_queue_fileno(self) -> Optional[int]:
"""If work queue must be selected before calling
``receive_from_work_queue`` then implementation must
return work queue fd."""
raise NotImplementedError()
@abstractmethod
def work(self, *args: Any) -> None:
raise NotImplementedError()
def create(self, uid: str, *args: Any) -> 'Work[T]':
return cast(
'Work[T]', self.flags.work_klass(
self.flags.work_klass.create(*args),
flags=self.flags,
event_queue=self.event_queue,
uid=uid,
upstream_conn_pool=self._upstream_conn_pool,
),
)
def close_work_queue(self) -> None:
"""Only called if ``work_queue_fileno`` returns an integer.
If an fd is select-able for work queue, make sure
to close the work queue fd now."""
pass # pragma: no cover
async def _update_work_events(self, work_id: int) -> None:
assert self.selector is not None
worker_events = await self.works[work_id].get_events()
# NOTE: Current assumption is that multiple works will not
# be interested in the same fd. Descriptors of interests
# returned by work must be unique.
#
# TODO: Ideally we must diff and unregister socks not
# returned of interest within current _select_events call
# but exists in the registered_socks_by_work_ids registry.
for fileno in worker_events:
if work_id not in self.registered_events_by_work_ids:
self.registered_events_by_work_ids[work_id] = {}
mask = worker_events[fileno]
if fileno in self.registered_events_by_work_ids[work_id]:
oldmask = self.registered_events_by_work_ids[work_id][fileno]
if mask != oldmask:
self.selector.modify(
fileno, events=mask,
data=work_id,
)
self.registered_events_by_work_ids[work_id][fileno] = mask
logger.debug(
'fd#{0} modified for mask#{1} by work#{2}'.format(
fileno, mask, work_id,
),
)
# else:
# logger.info(
# 'fd#{0} by work#{1} not modified'.format(fileno, work_id))
elif fileno in self._upstream_conn_filenos:
# Descriptor offered by work, but is already registered by connection pool
# Most likely because work has acquired a reusable connection.
self.selector.modify(fileno, events=mask, data=work_id)
self.registered_events_by_work_ids[work_id][fileno] = mask
self._upstream_conn_filenos.remove(fileno)
logger.debug(
'fd#{0} borrowed with mask#{1} by work#{2}'.format(
fileno, mask, work_id,
),
)
# Can throw ValueError: Invalid file descriptor: -1
#
# A guard within Work classes may not help here due to
# asynchronous nature. Hence, threadless will handle
# ValueError exceptions raised by selector.register
# for invalid fd.
#
# TODO: Also remove offending work from pool to avoid spin loop.
elif fileno != -1:
self.selector.register(fileno, events=mask, data=work_id)
self.registered_events_by_work_ids[work_id][fileno] = mask
logger.debug(
'fd#{0} registered for mask#{1} by work#{2}'.format(
fileno, mask, work_id,
),
)
async def _update_conn_pool_events(self) -> None:
if not self._upstream_conn_pool:
return
assert self.selector is not None
new_conn_pool_events = await self._upstream_conn_pool.get_events()
old_conn_pool_filenos = self._upstream_conn_filenos.copy()
self._upstream_conn_filenos.clear()
new_conn_pool_filenos = set(new_conn_pool_events.keys())
new_conn_pool_filenos.difference_update(old_conn_pool_filenos)
for fileno in new_conn_pool_filenos:
self.selector.register(
fileno,
events=new_conn_pool_events[fileno],
data=0,
)
self._upstream_conn_filenos.add(fileno)
old_conn_pool_filenos.difference_update(self._upstream_conn_filenos)
for fileno in old_conn_pool_filenos:
self.selector.unregister(fileno)
async def _update_selector(self) -> None:
assert self.selector is not None
unfinished_work_ids = set()
for task in self.unfinished:
unfinished_work_ids.add(task._work_id) # type: ignore
for work_id in self.works:
# We don't want to invoke work objects which haven't
# yet finished their previous task
if work_id in unfinished_work_ids:
continue
await self._update_work_events(work_id)
await self._update_conn_pool_events()
async def _selected_events(self) -> Tuple[
Dict[int, Tuple[Readables, Writables]],
bool,
]:
"""For each work, collects events that they are interested in.
Calls select for events of interest.
Returns a 2-tuple containing a dictionary and boolean.
Dictionary keys are work IDs and values are 2-tuple
containing ready readables & writables.
Returned boolean value indicates whether there is
a newly accepted work waiting to be received and
queued for processing. This is only applicable when
:class:`~proxy.core.work.threadless.Threadless.work_queue_fileno`
returns a valid fd.
"""
assert self.selector is not None
await self._update_selector()
# Keys are work_id and values are 2-tuple indicating
# readables & writables that work_id is interested in
# and are ready for IO.
work_by_ids: Dict[int, Tuple[Readables, Writables]] = {}
new_work_available = False
wqfileno = self.work_queue_fileno()
if wqfileno is None:
# When ``work_queue_fileno`` returns None,
# always return True for the boolean value.
new_work_available = True
events = self.selector.select(
timeout=DEFAULT_SELECTOR_SELECT_TIMEOUT,
)
for key, mask in events:
if not new_work_available and wqfileno is not None and key.fileobj == wqfileno:
assert mask & selectors.EVENT_READ
new_work_available = True
continue
if key.data not in work_by_ids:
work_by_ids[key.data] = ([], [])
if mask & selectors.EVENT_READ:
work_by_ids[key.data][0].append(key.fd)
if mask & selectors.EVENT_WRITE:
work_by_ids[key.data][1].append(key.fd)
return (work_by_ids, new_work_available)
async def _wait_for_tasks(self) -> Set['asyncio.Task[bool]']:
finished, self.unfinished = await asyncio.wait(
self.unfinished,
timeout=self.wait_timeout,
return_when=asyncio.FIRST_COMPLETED,
)
return finished # noqa: WPS331
def _cleanup_inactive(self) -> None:
inactive_works: List[int] = []
for work_id in self.works:
if self.works[work_id].is_inactive():
inactive_works.append(work_id)
for work_id in inactive_works:
self._cleanup(work_id)
# TODO: HttpProtocolHandler.shutdown can call flush which may block
def _cleanup(self, work_id: int) -> None:
if work_id in self.registered_events_by_work_ids:
assert self.selector
for fileno in self.registered_events_by_work_ids[work_id]:
logger.debug(
'fd#{0} unregistered by work#{1}'.format(
fileno, work_id,
),
)
self.selector.unregister(fileno)
self.registered_events_by_work_ids[work_id].clear()
del self.registered_events_by_work_ids[work_id]
self.works[work_id].shutdown()
del self.works[work_id]
if self.work_queue_fileno() is not None:
os.close(work_id)
def _create_tasks(
self,
work_by_ids: Dict[int, Tuple[Readables, Writables]],
) -> Set['asyncio.Task[bool]']:
assert self.loop
tasks: Set['asyncio.Task[bool]'] = set()
for work_id in work_by_ids:
if work_id == 0:
assert self._upstream_conn_pool
task = self.loop.create_task(
self._upstream_conn_pool.handle_events(
*work_by_ids[work_id],
),
)
else:
task = self.loop.create_task(
self.works[work_id].handle_events(*work_by_ids[work_id]),
)
task._work_id = work_id # type: ignore[attr-defined]
# task.set_name(work_id)
tasks.add(task)
return tasks
async def _run_once(self) -> bool:
assert self.loop is not None
work_by_ids, new_work_available = await self._selected_events()
# Accept new work if available
#
# TODO: We must use a work klass to handle
# client_queue fd itself a.k.a. accept_client
# will become handle_readables.
if new_work_available:
teardown = self.receive_from_work_queue()
if teardown:
return teardown
if len(work_by_ids) == 0:
return False
# Invoke Threadless.handle_events
self.unfinished.update(self._create_tasks(work_by_ids))
# logger.debug('Executing {0} works'.format(len(self.unfinished)))
# Cleanup finished tasks
for task in await self._wait_for_tasks():
# Checking for result can raise exception e.g.
# CancelledError, InvalidStateError or an exception
# from underlying task e.g. TimeoutError.
teardown = False
work_id = task._work_id # type: ignore
try:
teardown = task.result()
finally:
if teardown:
self._cleanup(work_id)
# self.cleanup(int(task.get_name()))
# logger.debug(
# 'Done executing works, {0} pending, {1} registered'.format(
# len(self.unfinished), len(self.registered_events_by_work_ids),
# ),
# )
return False
async def _run_forever(self) -> None:
tick = 0
try:
while True:
if await self._run_once():
break
# Check for inactive and shutdown signal
elapsed = tick * \
(DEFAULT_SELECTOR_SELECT_TIMEOUT + self.wait_timeout)
if elapsed >= self.cleanup_inactive_timeout:
self._cleanup_inactive()
if self.running.is_set():
break
tick = 0
tick += 1
except KeyboardInterrupt:
pass
finally:
if self.loop:
self.loop.stop()
def run(self) -> None:
Logger.setup(
self.flags.log_file, self.flags.log_level,
self.flags.log_format,
)
wqfileno = self.work_queue_fileno()
try:
self.selector = selectors.DefaultSelector()
if wqfileno is not None:
self.selector.register(
wqfileno,
selectors.EVENT_READ,
data=wqfileno,
)
assert self.loop
logger.debug('Working on {0} works'.format(len(self.works)))
self.loop.create_task(self._run_forever())
self.loop.run_forever()
except KeyboardInterrupt:
pass
finally:
assert self.selector is not None
if wqfileno is not None:
self.selector.unregister(wqfileno)
self.close_work_queue()
self.selector.close()
assert self.loop is not None
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.loop.close()
| {
"content_hash": "af2622b8e77cfc167a3395e9d319081d",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 93,
"avg_line_length": 39.242924528301884,
"alnum_prop": 0.5702265761163532,
"repo_name": "abhinavsingh/proxy.py",
"id": "f43c0a47329da90f0b54ce3c9ab9bec871ea351e",
"size": "16669",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "proxy/core/work/threadless.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "891"
},
{
"name": "Dockerfile",
"bytes": "1222"
},
{
"name": "HTML",
"bytes": "3454"
},
{
"name": "JavaScript",
"bytes": "2260"
},
{
"name": "Jupyter Notebook",
"bytes": "29773"
},
{
"name": "Makefile",
"bytes": "6399"
},
{
"name": "Procfile",
"bytes": "387"
},
{
"name": "Python",
"bytes": "680280"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "19211"
},
{
"name": "TypeScript",
"bytes": "23642"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('servicemap', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='service',
name='log_services',
field=models.ManyToManyField(related_name='log_service', to='servicemap.Service'),
),
migrations.AddField(
model_name='service',
name='login_systems',
field=models.ManyToManyField(related_name='login_service', to='servicemap.Service'),
),
migrations.AlterUniqueTogether(
name='hostrole',
unique_together=set([('host', 'role')]),
),
]
| {
"content_hash": "9e7dd2b40aaa62a3a6c854268297a2b5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 96,
"avg_line_length": 28.037037037037038,
"alnum_prop": 0.5812417437252312,
"repo_name": "vegitron/servicemap",
"id": "c4735d01b594fd61624b1bcb88feff5ce060d95d",
"size": "781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "servicemap/migrations/0002_auto_20150820_0930.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1771"
},
{
"name": "HTML",
"bytes": "7758"
},
{
"name": "Python",
"bytes": "43052"
}
],
"symlink_target": ""
} |
"""Tests for the runtime support utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from dragnn.protos import export_pb2
from dragnn.protos import spec_pb2
from dragnn.python import network_units
from dragnn.python import runtime_support
class MockNetwork(object):
"""Mock for tests."""
def __init__(self):
self.params = [
tf.get_variable('rank2', [64, 127], tf.float32),
tf.get_variable('rank3', [64, 127, 250], tf.float32)
]
self.derived_params = [
self._fake_derived_vector, self._fake_derived_parameter
]
def _fake_derived_vector(self):
value = tf.constant([1, 2, 3], dtype=tf.float32)
with tf.name_scope(None):
return tf.identity(value, name='derived/vector')
def _fake_derived_parameter(self):
# Use absolute scoping to put the derived parameter in the same namespace.
base_name = self.params[0].op.name.rsplit('/', 1)[0]
with tf.name_scope(None):
return tf.concat(
[self.params[0], self.params[0]],
axis=0,
name='{}/derived'.format(base_name))
class MockComponent(object):
"""Mock for tests."""
def __init__(self):
self.name = 'test_component'
self.spec = spec_pb2.ComponentSpec()
with tf.variable_scope(self.name):
self.network = MockNetwork()
def get_variable(self, var_name=None, var_params=None):
if var_name:
return tf.get_variable(var_name)
else:
return var_params
class RuntimeSupportTest(tf.test.TestCase):
"""Testing rig."""
def testAddLinkedHooks(self):
component = MockComponent()
link0 = component.spec.linked_feature.add()
link1 = component.spec.linked_feature.add()
link0.embedding_dim = -1 # direct link
link1.embedding_dim = 32 # transformed link
link0_matrix_name = network_units.linked_embeddings_name(0)
link1_matrix_name = network_units.linked_embeddings_name(1)
with self.test_session() as session:
graph = session.graph
# Create linked embedding matrices. Only channel 1 uses one.
with tf.variable_scope(component.name):
tf.get_variable(link1_matrix_name, shape=[64 + 1, 32], dtype=tf.float32)
# Add hooks. This should ignore channel 0 and add hooks for channel 1.
with tf.variable_scope(component.name, reuse=True):
runtime_support.add_hooks(component, export_pb2.CellSubgraphSpec())
# Check that no hooks were added for channel 0.
with self.assertRaises(KeyError):
graph.get_tensor_by_name(
'{}/{}/weights:0'.format(component.name, link0_matrix_name))
with self.assertRaises(KeyError):
graph.get_tensor_by_name('{}/{}/weights/transposed:0'.format(
component.name, link0_matrix_name))
with self.assertRaises(KeyError):
graph.get_tensor_by_name('{}/{}/weights/transposed/shape:0'.format(
component.name, link0_matrix_name))
with self.assertRaises(KeyError):
graph.get_tensor_by_name('{}/{}/weights/transposed/blocked32:0'.format(
component.name, link0_matrix_name))
with self.assertRaises(KeyError):
graph.get_tensor_by_name('{}/{}/weights/transposed/blocked48:0'.format(
component.name, link0_matrix_name))
with self.assertRaises(KeyError):
graph.get_tensor_by_name(
'{}/{}/out_of_bounds:0'.format(component.name, link0_matrix_name))
# Get the hooks added for channel 1.
weights = graph.get_tensor_by_name(
'{}/{}/weights:0'.format(component.name, link1_matrix_name))
transposed = graph.get_tensor_by_name('{}/{}/weights/transposed:0'.format(
component.name, link1_matrix_name))
transposed_shape = graph.get_tensor_by_name(
'{}/{}/weights/transposed/shape:0'.format(component.name,
link1_matrix_name))
transposed32 = graph.get_tensor_by_name(
'{}/{}/weights/transposed/blocked32:0'.format(component.name,
link1_matrix_name))
transposed48 = graph.get_tensor_by_name(
'{}/{}/weights/transposed/blocked48:0'.format(component.name,
link1_matrix_name))
out_of_bounds = graph.get_tensor_by_name(
'{}/{}/out_of_bounds:0'.format(component.name, link1_matrix_name))
# Check dimensions of the hooks.
tf.global_variables_initializer().run()
self.assertAllEqual(tf.shape(weights).eval(), [64, 32])
self.assertAllEqual(tf.shape(transposed).eval(), [32, 64])
self.assertAllEqual(transposed_shape.eval(), [32, 64])
self.assertAllEqual(tf.shape(transposed32).eval(), [2, 32, 32])
self.assertAllEqual(tf.shape(transposed48).eval(), [2, 32, 48])
self.assertAllEqual(tf.shape(out_of_bounds).eval(), [1, 32])
def testAddFixedHooks(self):
component = MockComponent()
fixed0 = component.spec.fixed_feature.add()
fixed1 = component.spec.fixed_feature.add()
fixed0.embedding_dim = -1
fixed1.embedding_dim = 32
fixed0.vocabulary_size = 100
fixed1.vocabulary_size = 1000
fixed0_matrix_name = network_units.fixed_embeddings_name(0)
fixed1_matrix_name = network_units.fixed_embeddings_name(1)
with self.test_session() as session:
graph = session.graph
# Create fixed embedding matrices. Only channel 1 uses one.
with tf.variable_scope(component.name):
tf.get_variable(
fixed1_matrix_name, shape=[1000 + 1, 32], dtype=tf.float32)
# Add hooks. This should ignore channel 0 and add hooks for channel 1.
with tf.variable_scope(component.name, reuse=True):
runtime_support.add_hooks(component, export_pb2.CellSubgraphSpec())
# Check that no hooks were added for channel 0.
with self.assertRaises(KeyError):
graph.get_tensor_by_name(
'{}/{}/trimmed:0'.format(component.name, fixed0_matrix_name))
# Get the hooks added for channel 1.
trimmed = graph.get_tensor_by_name(
'{}/{}/trimmed:0'.format(component.name, fixed1_matrix_name))
# Check dimensions of the hooks.
tf.global_variables_initializer().run()
self.assertAllEqual(tf.shape(trimmed).eval(), [1000, 32])
def testAddParamsHooks(self):
component = MockComponent()
rank2_name = 'rank2'
rank3_name = 'rank3'
with self.test_session() as session:
graph = session.graph
# Add hooks. This should add hooks for all rank-2 params.
with tf.variable_scope(component.name, reuse=True):
runtime_support.add_hooks(component, export_pb2.CellSubgraphSpec())
# Check that no hooks were added for the rank-3 params.
with self.assertRaises(KeyError):
graph.get_tensor_by_name(
'{}/{}/matrix:0'.format(component.name, rank3_name))
with self.assertRaises(KeyError):
graph.get_tensor_by_name(
'{}/{}/transposed:0'.format(component.name, rank3_name))
with self.assertRaises(KeyError):
graph.get_tensor_by_name(
'{}/{}/matrix/blocked32:0'.format(component.name, rank3_name))
with self.assertRaises(KeyError):
graph.get_tensor_by_name(
'{}/{}/matrix/blocked48:0'.format(component.name, rank3_name))
with self.assertRaises(KeyError):
graph.get_tensor_by_name(
'{}/{}/transposed/blocked32:0'.format(component.name, rank3_name))
with self.assertRaises(KeyError):
graph.get_tensor_by_name(
'{}/{}/transposed/blocked48:0'.format(component.name, rank3_name))
with self.assertRaises(KeyError):
graph.get_tensor_by_name(
'{}/{}/matrix/shape:0'.format(component.name, rank3_name))
with self.assertRaises(KeyError):
graph.get_tensor_by_name(
'{}/{}/transposed/shape:0'.format(component.name, rank3_name))
# Get the hooks added for each variable.
matrix = graph.get_tensor_by_name(
'{}/{}/matrix:0'.format(component.name, rank2_name))
transposed = graph.get_tensor_by_name(
'{}/{}/transposed:0'.format(component.name, rank2_name))
matrix32 = graph.get_tensor_by_name(
'{}/{}/matrix/blocked32:0'.format(component.name, rank2_name))
matrix48 = graph.get_tensor_by_name(
'{}/{}/matrix/blocked48:0'.format(component.name, rank2_name))
transposed32 = graph.get_tensor_by_name(
'{}/{}/transposed/blocked32:0'.format(component.name, rank2_name))
transposed48 = graph.get_tensor_by_name(
'{}/{}/transposed/blocked48:0'.format(component.name, rank2_name))
matrix_shape = graph.get_tensor_by_name(
'{}/{}/matrix/shape:0'.format(component.name, rank2_name))
transposed_shape = graph.get_tensor_by_name(
'{}/{}/transposed/shape:0'.format(component.name, rank2_name))
# Check dimensions of the hooks.
tf.global_variables_initializer().run()
self.assertAllEqual(tf.shape(matrix).eval(), [64, 127])
self.assertAllEqual(tf.shape(transposed).eval(), [127, 64])
self.assertAllEqual(matrix_shape.eval(), [64, 127])
self.assertAllEqual(transposed_shape.eval(), [127, 64])
self.assertAllEqual(tf.shape(matrix32).eval(), [4, 64, 32])
self.assertAllEqual(tf.shape(matrix48).eval(), [3, 64, 48])
self.assertAllEqual(tf.shape(transposed32).eval(), [2, 127, 32])
self.assertAllEqual(tf.shape(transposed48).eval(), [2, 127, 48])
def testAddDerivedParamHooks(self):
component = MockComponent()
derived_name = 'derived'
with self.test_session() as session:
graph = session.graph
# Add hooks.
with tf.variable_scope(component.name, reuse=True):
runtime_support.add_hooks(component, export_pb2.CellSubgraphSpec())
session.run(tf.global_variables_initializer())
# Get hooks for the derived vector.
vector = graph.get_tensor_by_name('derived/vector:0')
self.assertEqual(vector.shape, (3,))
# Get the hooks for the derived variable.
matrix = graph.get_tensor_by_name(
'{}/{}/matrix/blocked32:0'.format(component.name, derived_name))
self.assertAllEqual(tf.shape(matrix).eval(), [4, 128, 32])
# Check the bfloat16 version. It should have the same shape.
bfloat16_matrix = graph.get_tensor_by_name(
'{}/{}/matrix/blocked32/bfloat16:0'.format(component.name,
derived_name))
self.assertAllEqual(tf.shape(bfloat16_matrix).eval(), [4, 128, 32])
def testMakePaddedBlockedMatrix(self):
with self.test_session():
matrix = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15],
[16, 17, 18, 19, 20]]
expected_blocked = [[[1, 2], [6, 7], [11, 12],
[16, 17]], [[3, 4], [8, 9], [13, 14], [18, 19]],
[[5, 0], [10, 0], [15, 0], [20, 0]]]
matrix = tf.constant(matrix, tf.float32)
actual_blocked = runtime_support.make_padded_blocked_matrix(matrix, 2)
self.assertAllEqual(actual_blocked.eval(), expected_blocked)
def testBfloat16Permutation(self):
with self.test_session():
matrix = [list(range(16))]
expected_permuted = [[
0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
]]
matrix = tf.constant(matrix, tf.float32)
actual_permuted = runtime_support.bfloat16_permutation(matrix)
self.assertAllEqual(actual_permuted.eval(), expected_permuted)
def testLargerBfloat16Permutation(self):
with self.test_session() as session:
matrix = tf.random_uniform((3, 4, 32))
permuted = runtime_support.bfloat16_permutation(matrix)
matrix, actual_permuted = session.run([matrix, permuted])
# Just check a few items for now, hopefully that's sufficient to ensure
# the permutation is okay.
self.assertEqual(matrix[0, 0, 0], actual_permuted[0, 0, 0])
self.assertEqual(matrix[0, 0, 1], actual_permuted[0, 0, 1])
self.assertEqual(matrix[1, 1, 16], actual_permuted[1, 1, 16])
self.assertEqual(matrix[2, 0, 4], actual_permuted[2, 0, 8])
self.assertEqual(matrix[2, 0, 5], actual_permuted[2, 0, 9])
self.assertEqual(matrix[2, 1, 8], actual_permuted[2, 1, 4])
self.assertEqual(matrix[2, 1, 8 + 16], actual_permuted[2, 1, 4 + 16])
def testAddCellSubgraphSpecHook(self):
component = MockComponent()
cell = export_pb2.CellSubgraphSpec()
cell.input.add(
name='feature',
tensor='feature_tensor',
type=export_pb2.CellSubgraphSpec.Input.TYPE_FEATURE)
cell.input.add(
name='recurrent',
tensor='recurrent_tensor',
type=export_pb2.CellSubgraphSpec.Input.TYPE_RECURRENT)
cell.output.add(name='layer_0', tensor='layer_0_tensor')
cell.output.add(name='logits', tensor='logits_tensor')
with self.test_session() as session:
graph = session.graph
# Add hooks for the cell constructed above.
with tf.variable_scope(component.name, reuse=True):
runtime_support.add_hooks(component, cell)
# Get the hook containing the wire-format proto.
cell_wire_format = graph.get_tensor_by_name(
'{}/EXPORT/CellSubgraphSpec:0'.format(component.name))
# Check that the hook matches the cell.
tf.global_variables_initializer().run()
self.assertEqual(cell_wire_format.eval(), cell.SerializeToString())
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "6020eb9eebbeff2622eb092c96a7bd2a",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 80,
"avg_line_length": 41.58715596330275,
"alnum_prop": 0.6348996249724245,
"repo_name": "derekjchow/models",
"id": "7488e33e1e369b100b7d50a5ae42e6241fd12ecd",
"size": "14276",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "research/syntaxnet/dragnn/python/runtime_support_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1523636"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33316"
},
{
"name": "Jupyter Notebook",
"bytes": "2831692"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "14201542"
},
{
"name": "Shell",
"bytes": "158255"
}
],
"symlink_target": ""
} |
"""Translates Rackspace monitoring entities to labels in graphite_api"""
from graphite_api_rackspace import metadata
__version__ = metadata.version
__author__ = metadata.authors[0]
__license__ = metadata.license
__copyright__ = metadata.copyright
| {
"content_hash": "7ddb870a1298e01d79a33464f19e9cf0",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 27.77777777777778,
"alnum_prop": 0.752,
"repo_name": "JustinHop/graphite_api_rackspace",
"id": "8f873baa53e5881e8cbe618e1a7cc3095e7f5b46",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphite_api_rackspace/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5706"
},
{
"name": "Python",
"bytes": "28604"
},
{
"name": "Shell",
"bytes": "5189"
}
],
"symlink_target": ""
} |
"""
Django settings for openlc project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf import global_settings
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mg4y_z$!eik+^(j+3tqo$7j+qai1v5-chrn10jh6y@a4v(%u5@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'notebooks',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'openlc.urls'
WSGI_APPLICATION = 'openlc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATICFILES_FINDERS = ("django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder")
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR, 'media')]
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'notebooks.context_processor_functions.get_categories',
) | {
"content_hash": "3972fb4c9bdb3be50d88639d5cbfbc70",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 86,
"avg_line_length": 25.649484536082475,
"alnum_prop": 0.7226688102893891,
"repo_name": "Astrogel/test",
"id": "07f359c3da7d4f8b487ef4ec0ae025b6780815d0",
"size": "2488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openlc/openlc/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3638"
},
{
"name": "JavaScript",
"bytes": "355"
},
{
"name": "Python",
"bytes": "14997"
}
],
"symlink_target": ""
} |
""" Create the implicit zero concentrations in a model
:Author: Arthur Goldberg <Arthur.Goldberg@mssm.edu>
:Author: Jonathan Karr <jonrkarr@gmail.com>
:Date: 2018-11-28
:Copyright: 2017-2018, Karr Lab
:License: MIT
"""
from .core import Transform
from wc_lang.core import DistributionInitConcentration
from wc_utils.util.units import unit_registry
class CreateImplicitDistributionZeroInitConcentrationsTransform(Transform):
""" Create the implicit zero concentrations in a model
"""
class Meta(object):
id = 'CreateImplicitDistributionZeroInitConcentrations'
label = 'Create the implicit zero concentrations in a model'
def run(self, model):
""" Transform model
Args:
model (:obj:`Model`): model
Returns:
:obj:`Model`: same model, but transformed
"""
for species in model.get_species():
if species.distribution_init_concentration is None:
species.distribution_init_concentration = DistributionInitConcentration(
model=model,
species=species,
mean=0.0, std=0.0, units=unit_registry.parse_units('M'))
species.distribution_init_concentration.id = species.distribution_init_concentration.gen_id()
return model
| {
"content_hash": "5471bd630d06f35511fe2401d25e9364",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 109,
"avg_line_length": 33.075,
"alnum_prop": 0.6643990929705216,
"repo_name": "KarrLab/obj_model",
"id": "e604c08525e2f27b6e9c0e25dd14e42659bd9b08",
"size": "1323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/fixtures/migrate/wc_lang_fixture/wc_lang/transform/create_implicit_dist_zero_init_concs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2360"
},
{
"name": "HTML",
"bytes": "44150"
},
{
"name": "JavaScript",
"bytes": "7739"
},
{
"name": "Python",
"bytes": "1487837"
}
],
"symlink_target": ""
} |
import os
import pytest
import pip.baseparser
from pip import main
from pip import cmdoptions
from pip.basecommand import Command
from pip.commands import commands
class FakeCommand(Command):
name = 'fake'
summary = name
def main(self, args):
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.add_option_group(index_opts)
return self.parse_args(args)
class TestOptionPrecedence(object):
"""
Tests for confirming our option precedence:
cli -> environment -> subcommand config -> global config -> option
defaults
"""
def setup(self):
self.environ_before = os.environ.copy()
commands[FakeCommand.name] = FakeCommand
def teardown(self):
os.environ = self.environ_before
commands.pop(FakeCommand.name)
def get_config_section(self, section):
config = {
'global': [('timeout', '-3')],
'fake': [('timeout', '-2')],
}
return config[section]
def get_config_section_global(self, section):
config = {
'global': [('timeout', '-3')],
'fake': [],
}
return config[section]
def test_env_override_default_int(self):
"""
Test that environment variable overrides an int option default.
"""
os.environ['PIP_TIMEOUT'] = '-1'
options, args = main(['fake'])
assert options.timeout == -1
def test_env_override_default_append(self):
"""
Test that environment variable overrides an append option default.
"""
os.environ['PIP_FIND_LINKS'] = 'F1'
options, args = main(['fake'])
assert options.find_links == ['F1']
os.environ['PIP_FIND_LINKS'] = 'F1 F2'
options, args = main(['fake'])
assert options.find_links == ['F1', 'F2']
def test_env_override_default_choice(self):
"""
Test that environment variable overrides a choice option default.
"""
os.environ['PIP_EXISTS_ACTION'] = 'w'
options, args = main(['fake'])
assert options.exists_action == ['w']
os.environ['PIP_EXISTS_ACTION'] = 's w'
options, args = main(['fake'])
assert options.exists_action == ['s', 'w']
def test_env_alias_override_default(self):
"""
When an option has multiple long forms, test that the technique of
using the env variable, "PIP_<long form>" works for all cases.
(e.g. PIP_LOG_FILE and PIP_LOCAL_LOG should all work)
"""
os.environ['PIP_LOG_FILE'] = 'override.log'
options, args = main(['fake'])
assert options.log_file == 'override.log'
os.environ['PIP_LOCAL_LOG'] = 'override.log'
options, args = main(['fake'])
assert options.log_file == 'override.log'
def test_cli_override_environment(self):
"""
Test the cli overrides and environment variable
"""
os.environ['PIP_TIMEOUT'] = '-1'
options, args = main(['fake', '--timeout', '-2'])
assert options.timeout == -2
def test_environment_override_config(self, monkeypatch):
"""
Test an environment variable overrides the config file
"""
monkeypatch.setattr(
pip.baseparser.ConfigOptionParser,
"get_config_section",
self.get_config_section,
)
os.environ['PIP_TIMEOUT'] = '-1'
options, args = main(['fake'])
assert options.timeout == -1
def test_commmand_config_override_global_config(self, monkeypatch):
"""
Test that command config overrides global config
"""
monkeypatch.setattr(
pip.baseparser.ConfigOptionParser,
"get_config_section",
self.get_config_section,
)
options, args = main(['fake'])
assert options.timeout == -2
def test_global_config_is_used(self, monkeypatch):
"""
Test that global config is used
"""
monkeypatch.setattr(
pip.baseparser.ConfigOptionParser,
"get_config_section",
self.get_config_section_global,
)
options, args = main(['fake'])
assert options.timeout == -3
class TestOptionsInterspersed(object):
def setup(self):
self.environ_before = os.environ.copy()
commands[FakeCommand.name] = FakeCommand
def teardown(self):
os.environ = self.environ_before
commands.pop(FakeCommand.name)
def test_general_option_after_subcommand(self):
options, args = main(['fake', '--timeout', '-1'])
assert options.timeout == -1
def test_option_after_subcommand_arg(self):
options, args = main(['fake', 'arg', '--timeout', '-1'])
assert options.timeout == -1
def test_additive_before_after_subcommand(self):
options, args = main(['-v', 'fake', '-v'])
assert options.verbose == 2
def test_subcommand_option_before_subcommand_fails(self):
with pytest.raises(SystemExit):
main(['--find-links', 'F1', 'fake'])
class TestGeneralOptions(object):
# the reason to specifically test general options is due to the
# extra processing they receive, and the number of bugs we've had
def setup(self):
self.environ_before = os.environ.copy()
commands[FakeCommand.name] = FakeCommand
def teardown(self):
os.environ = self.environ_before
commands.pop(FakeCommand.name)
def test_require_virtualenv(self):
options1, args1 = main(['--require-virtualenv', 'fake'])
options2, args2 = main(['fake', '--require-virtualenv'])
assert options1.require_venv
assert options2.require_venv
def test_verbose(self):
options1, args1 = main(['--verbose', 'fake'])
options2, args2 = main(['fake', '--verbose'])
assert options1.verbose == options2.verbose == 1
def test_quiet(self):
options1, args1 = main(['--quiet', 'fake'])
options2, args2 = main(['fake', '--quiet'])
assert options1.quiet == options2.quiet == 1
def test_log(self):
options1, args1 = main(['--log', 'path', 'fake'])
options2, args2 = main(['fake', '--log', 'path'])
assert options1.log == options2.log == 'path'
def test_log_explicit_levels(self):
options1, args1 = main(['--log-explicit-levels', 'fake'])
options2, args2 = main(['fake', '--log-explicit-levels'])
assert options1.log_explicit_levels
assert options2.log_explicit_levels
def test_local_log(self):
options1, args1 = main(['--local-log', 'path', 'fake'])
options2, args2 = main(['fake', '--local-log', 'path'])
assert options1.log_file == options2.log_file == 'path'
def test_no_input(self):
options1, args1 = main(['--no-input', 'fake'])
options2, args2 = main(['fake', '--no-input'])
assert options1.no_input
assert options2.no_input
def test_proxy(self):
options1, args1 = main(['--proxy', 'path', 'fake'])
options2, args2 = main(['fake', '--proxy', 'path'])
assert options1.proxy == options2.proxy == 'path'
def test_retries(self):
options1, args1 = main(['--retries', '-1', 'fake'])
options2, args2 = main(['fake', '--retries', '-1'])
assert options1.retries == options2.retries == -1
def test_timeout(self):
options1, args1 = main(['--timeout', '-1', 'fake'])
options2, args2 = main(['fake', '--timeout', '-1'])
assert options1.timeout == options2.timeout == -1
def test_default_vcs(self):
options1, args1 = main(['--default-vcs', 'path', 'fake'])
options2, args2 = main(['fake', '--default-vcs', 'path'])
assert options1.default_vcs == options2.default_vcs == 'path'
def test_skip_requirements_regex(self):
options1, args1 = main(['--skip-requirements-regex', 'path', 'fake'])
options2, args2 = main(['fake', '--skip-requirements-regex', 'path'])
assert options1.skip_requirements_regex == 'path'
assert options2.skip_requirements_regex == 'path'
def test_exists_action(self):
options1, args1 = main(['--exists-action', 'w', 'fake'])
options2, args2 = main(['fake', '--exists-action', 'w'])
assert options1.exists_action == options2.exists_action == ['w']
def test_cert(self):
options1, args1 = main(['--cert', 'path', 'fake'])
options2, args2 = main(['fake', '--cert', 'path'])
assert options1.cert == options2.cert == 'path'
def test_client_cert(self):
options1, args1 = main(['--client-cert', 'path', 'fake'])
options2, args2 = main(['fake', '--client-cert', 'path'])
assert options1.client_cert == options2.client_cert == 'path'
def test_no_check_certificate(self):
options1, args1 = main(['--no-check-certificate', 'fake'])
options2, args2 = main(['fake', '--no-check-certificate'])
assert options1.no_check_certificate == options2.no_check_certificate
class TestOptionsConfigFiles(object):
def test_venv_config_file_found(self, monkeypatch):
# We only want a dummy object to call the get_config_files method
monkeypatch.setattr(
pip.baseparser.ConfigOptionParser,
'__init__',
lambda self: None,
)
# strict limit on the site_config_files list
monkeypatch.setattr(pip.baseparser, 'site_config_files', ['/a/place'])
# If we are running in a virtualenv and all files appear to exist,
# we should see two config files.
monkeypatch.setattr(
pip.baseparser,
'running_under_virtualenv',
lambda: True,
)
monkeypatch.setattr(os.path, 'exists', lambda filename: True)
cp = pip.baseparser.ConfigOptionParser()
assert len(cp.get_config_files()) == 3
| {
"content_hash": "bef35c34ba51002916ccaf42d1c7cf07",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 78,
"avg_line_length": 34.682758620689654,
"alnum_prop": 0.5873931199045536,
"repo_name": "1stvamp/pip",
"id": "3801aec5bd0ce765928b7d589bb283051155cbb7",
"size": "10058",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/unit/test_options.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from parsl.monitoring.monitoring import MonitoringHub
__all__ = ['MonitoringHub']
| {
"content_hash": "2b2738280eadb23c40ab2bbbd7e43660",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 53,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.7710843373493976,
"repo_name": "Parsl/parsl",
"id": "4f72c82e034601f32c6c0616c265444c7b8c64d7",
"size": "83",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsl/monitoring/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1263"
},
{
"name": "CSS",
"bytes": "337"
},
{
"name": "HTML",
"bytes": "12706"
},
{
"name": "Makefile",
"bytes": "4908"
},
{
"name": "Python",
"bytes": "1173869"
},
{
"name": "Shell",
"bytes": "12057"
}
],
"symlink_target": ""
} |
import os
import unicodedata
import shutil
import logging
import datetime
import re
from werkzeug.utils import secure_filename
from flask import current_app, render_template
from weasyprint import HTML, CSS
from geonature.utils.env import BACKEND_DIR
# get the root logger
log = logging.getLogger()
def remove_file(filepath, absolute_path=False):
try:
if absolute_path:
os.remove(filepath)
else:
os.remove(os.path.join(current_app.config["BASE_DIR"], filepath))
except Exception:
pass
def rename_file(old_chemin, old_title, new_title):
new_chemin = old_chemin.replace(
removeDisallowedFilenameChars(old_title), removeDisallowedFilenameChars(new_title),
)
os.rename(
os.path.join(current_app.config["BASE_DIR"], old_chemin),
os.path.join(current_app.config["BASE_DIR"], new_chemin),
)
return new_chemin
def upload_file(file, id_media, cd_ref, titre):
filename = ("{cd_ref}_{id_media}_{title}.{ext}").format(
cd_ref=str(cd_ref),
id_media=str(id_media),
title=removeDisallowedFilenameChars(titre),
ext=file.filename.rsplit(".", 1)[1],
)
filepath = os.path.join(current_app.config["UPLOAD_FOLDER"], filename)
file.save(os.path.join(current_app.config["BASE_DIR"], filepath))
return filepath
def removeDisallowedFilenameChars(uncleanString):
cleanedString = secure_filename(uncleanString)
cleanedString = unicodedata.normalize("NFKD", uncleanString)
cleanedString = re.sub("[ ]+", "_", cleanedString)
cleanedString = re.sub("[^0-9a-zA-Z_-]", "", cleanedString)
return cleanedString
def delete_recursively(path_folder, period=1, excluded_files=[]):
"""
Delete all the files and directory inside a directory
which have been create before a certain period
Paramters:
path_folder(string): path to the fomlder to delete
period(integer): in days: delete the file older than this period
exluded_files(list<string>): list of files to not delete
"""
for the_file in os.listdir(path_folder):
file_path = os.path.join(path_folder, the_file)
try:
now = datetime.datetime.now()
creation_date = datetime.datetime.utcfromtimestamp(os.path.getctime(file_path))
is_older_than_period = (now - creation_date).days >= period
if is_older_than_period:
if os.path.isfile(file_path) and not the_file in excluded_files:
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
log.error(e)
def generate_pdf(template, data, filename):
template_rendered = render_template(template, data=data)
html_file = HTML(string=template_rendered, base_url=current_app.config['API_ENDPOINT'], encoding="utf-8")
file_abs_path = str(BACKEND_DIR) + "/static/pdf/" + filename
html_file.write_pdf(file_abs_path)
return file_abs_path
| {
"content_hash": "855abc3e8e2cc02c514deacdb7be4cdf",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 109,
"avg_line_length": 33.77777777777778,
"alnum_prop": 0.6611842105263158,
"repo_name": "PnEcrins/GeoNature",
"id": "6fbdb01818182cc8b620a0a12bce593b60cad34b",
"size": "3040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/geonature/utils/filemanager.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1931"
},
{
"name": "Batchfile",
"bytes": "1151"
},
{
"name": "CSS",
"bytes": "763718"
},
{
"name": "HTML",
"bytes": "651"
},
{
"name": "JavaScript",
"bytes": "16182773"
},
{
"name": "PHP",
"bytes": "4058658"
},
{
"name": "PLpgSQL",
"bytes": "893372"
},
{
"name": "Shell",
"bytes": "33147"
}
],
"symlink_target": ""
} |
"""Test createwallet arguments.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error
)
class CreateWalletWatchonlyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
self.nodes[0].createwallet(wallet_name='default')
def_wallet = node.get_wallet_rpc('default')
a1 = def_wallet.getnewaddress()
wo_change = def_wallet.getnewaddress()
wo_addr = def_wallet.getnewaddress()
self.nodes[0].createwallet(wallet_name='wo', disable_private_keys=True)
wo_wallet = node.get_wallet_rpc('wo')
wo_wallet.importpubkey(pubkey=def_wallet.getaddressinfo(wo_addr)['pubkey'])
wo_wallet.importpubkey(pubkey=def_wallet.getaddressinfo(wo_change)['pubkey'])
# generate some btc for testing
node.generatetoaddress(101, a1)
# send 1 btc to our watch-only address
txid = def_wallet.sendtoaddress(wo_addr, 1)
self.nodes[0].generate(1)
# getbalance
self.log.info('include_watchonly should default to true for watch-only wallets')
self.log.info('Testing getbalance watch-only defaults')
assert_equal(wo_wallet.getbalance(), 1)
assert_equal(len(wo_wallet.listtransactions()), 1)
assert_equal(wo_wallet.getbalance(include_watchonly=False), 0)
self.log.info('Testing listreceivedbyaddress watch-only defaults')
result = wo_wallet.listreceivedbyaddress()
assert_equal(len(result), 1)
assert_equal(result[0]["involvesWatchonly"], True)
result = wo_wallet.listreceivedbyaddress(include_watchonly=False)
assert_equal(len(result), 0)
self.log.info('Testing listreceivedbylabel watch-only defaults')
result = wo_wallet.listreceivedbylabel()
assert_equal(len(result), 1)
assert_equal(result[0]["involvesWatchonly"], True)
result = wo_wallet.listreceivedbylabel(include_watchonly=False)
assert_equal(len(result), 0)
self.log.info('Testing listtransactions watch-only defaults')
result = wo_wallet.listtransactions()
assert_equal(len(result), 1)
assert_equal(result[0]["involvesWatchonly"], True)
result = wo_wallet.listtransactions(include_watchonly=False)
assert_equal(len(result), 0)
self.log.info('Testing listsinceblock watch-only defaults')
result = wo_wallet.listsinceblock()
assert_equal(len(result["transactions"]), 1)
assert_equal(result["transactions"][0]["involvesWatchonly"], True)
result = wo_wallet.listsinceblock(include_watchonly=False)
assert_equal(len(result["transactions"]), 0)
self.log.info('Testing gettransaction watch-only defaults')
result = wo_wallet.gettransaction(txid)
assert_equal(result["details"][0]["involvesWatchonly"], True)
result = wo_wallet.gettransaction(txid=txid, include_watchonly=False)
assert_equal(len(result["details"]), 0)
self.log.info('Testing walletcreatefundedpsbt watch-only defaults')
inputs = []
outputs = [{a1: 0.5}]
options = {'changeAddress': wo_change}
no_wo_options = {'changeAddress': wo_change, 'includeWatching': False}
result = wo_wallet.walletcreatefundedpsbt(inputs=inputs, outputs=outputs, options=options)
assert_equal("psbt" in result, True)
assert_raises_rpc_error(-4, "Insufficient funds", wo_wallet.walletcreatefundedpsbt, inputs, outputs, 0, no_wo_options)
self.log.info('Testing fundrawtransaction watch-only defaults')
rawtx = wo_wallet.createrawtransaction(inputs=inputs, outputs=outputs)
result = wo_wallet.fundrawtransaction(hexstring=rawtx, options=options)
assert_equal("hex" in result, True)
assert_raises_rpc_error(-4, "Insufficient funds", wo_wallet.fundrawtransaction, rawtx, no_wo_options)
if __name__ == '__main__':
CreateWalletWatchonlyTest().main()
| {
"content_hash": "b43d4c32007253f08edab9db57e60922",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 126,
"avg_line_length": 41.31683168316832,
"alnum_prop": 0.6743350107836089,
"repo_name": "pstratem/bitcoin",
"id": "24799fe5f20898e710860a1696dbdaf3964704aa",
"size": "4387",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/functional/wallet_watchonly.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "695537"
},
{
"name": "C++",
"bytes": "6406006"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "198872"
},
{
"name": "Makefile",
"bytes": "121257"
},
{
"name": "Objective-C",
"bytes": "123749"
},
{
"name": "Objective-C++",
"bytes": "5382"
},
{
"name": "Python",
"bytes": "1602858"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "97840"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import sargasso
setup(
name="sargasso",
version=sargasso.__version__,
url='https://github.com/statbio/Sargasso',
license='MIT License',
author='Owen Dando',
author_email='owen.dando@ed.ac.uk',
packages=find_packages(),
install_requires=[
'docopt',
'pysam',
'schema',
'pytest',
],
scripts=[
'bin/build_star_index',
'bin/build_bowtie2_index',
'bin/collate_raw_reads',
'bin/filter_control',
'bin/filter_reads',
'bin/filter_sample_reads',
'bin/map_reads_rnaseq',
'bin/map_reads_dnaseq',
'bin/sargasso_parameter_test',
'bin/sort_reads',
'bin/species_separator',
]
)
| {
"content_hash": "d0d43ad94d50096c476546ef6bc1d20d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 46,
"avg_line_length": 24.09375,
"alnum_prop": 0.569390402075227,
"repo_name": "statbio/Sargasso",
"id": "2710a99dfcbf3f85f53cce3626e9cc91957fc21d",
"size": "771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99148"
},
{
"name": "Shell",
"bytes": "24994"
}
],
"symlink_target": ""
} |
"""
Miscellaneous commands.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you\'re keeping the plugin in CVS or some similar system.
__version__ = "%%VERSION%%"
__author__ = supybot.authors.jemfinch
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
from . import config
from . import plugin
from imp import reload
reload(plugin) # In case we're being reloaded.
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
| {
"content_hash": "dd27e0aad061c32bc944c7954881fa45",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 77,
"avg_line_length": 22.703703703703702,
"alnum_prop": 0.7275693311582382,
"repo_name": "ProgVal/Limnoria-test",
"id": "95dbd64b1481c95a939e3e7c21ab2cec7c601dab",
"size": "2202",
"binary": false,
"copies": "4",
"ref": "refs/heads/debug-pypy-sqlite",
"path": "plugins/Misc/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "864"
},
{
"name": "Python",
"bytes": "2591313"
},
{
"name": "Shell",
"bytes": "217"
}
],
"symlink_target": ""
} |
"""
:Authors: - Iason
"""
import argparse
import sys
import logging
import math
from reader import load_grammar
from collections import defaultdict, Counter
from sentence import make_sentence
from slice_variable import SliceVariable
from sliced_earley import SlicedEarley
from sliced_nederhof import SlicedNederhof
from topsort import top_sort
from inference import inside
from generalisedSampling import GeneralisedSampling
from symbol import parse_annotated_nonterminal, make_nonterminal
import time
import re
from wcfg import WCFG
from earley import Earley
from nederhof import Nederhof
from nltk import Tree
def inlinetree(t):
s = str(t).replace('\n','')
return re.sub(' +', ' ', s)
def make_nltk_tree(derivation):
"""
Recursively constructs an nlt Tree from a list of rules.
@param top: index to the top rule (0 and -1 are the most common values)
"""
d = defaultdict(None, ((r.lhs, r) for r in derivation))
def make_tree(sym):
r = d[sym]
return Tree(str(r.lhs), (str(child) if child not in d else make_tree(child) for child in r.rhs))
return make_tree(derivation[0].lhs)
def get_conditions(d):
"""
update conditions: the probability of each state of the previous derivation is assigned
to the condition of that state
"""
return {parse_annotated_nonterminal(rule.lhs): rule.log_prob for rule in d}
def permutation_length(nonterminal):
"""
:param nonterminal: a non-terminal of the form: '[P1234*2_1]
:return: the length of the permutations within the non-terminal
"""
NT_PERMUTATION = re.compile(r'P([0-9]+)')
matches = NT_PERMUTATION.search(nonterminal)
if matches is not None:
assert matches is not None, 'bad format %s' % nonterminal
permutation = matches.group(1)
return len(permutation)
else:
return 0
def initialise(wcfg, wfsa, root, goal, intersection):
"""
Calculate a first derivation based on a simpler (thus smaller/faster) version of the grammar
Thereby determining the initial conditions.
Only applicable with the 'milos' grammar format, i.e. non-terminals have the form: '[P1234*2_1]'
"""
smaller = WCFG([])
logging.debug('Creating a smaller grammar for initial conditions...')
for line in wcfg:
if 0 < permutation_length(line.lhs) <= 2:
smaller.add(line)
elif line.lhs == root or line.lhs == '[UNK]':
smaller.add(line)
if intersection == 'nederhof':
init_parser = Nederhof(smaller, wfsa)
elif intersection == 'earley':
init_parser = Earley(smaller, wfsa)
else:
raise NotImplementedError('I do not know this algorithm: %s' % intersection)
logging.debug('Init Parsing...')
init_forest = init_parser.do(root, goal)
if not init_forest:
print 'NO PARSE FOUND'
return {}
else:
logging.debug('Forest: rules=%d', len(init_forest))
logging.debug('Init Topsorting...')
# sort the forest
sorted_nodes = top_sort(init_forest)
# calculate the inside weight of the sorted forest
logging.debug('Init Inside...')
init_inside_prob = inside(init_forest, sorted_nodes)
logging.debug('Init Sampling...')
gen_sampling = GeneralisedSampling(init_forest, init_inside_prob)
init_d = gen_sampling.sample(goal)
return get_conditions(init_d)
def sliced_sampling(wcfg, wfsa, root='[S]', goal='[GOAL]', n_samples=100, n_burn=100, max_iterations=1000, a=[0.1, 0.1],
b=[1.0, 1.0], intersection='nederhof', grammarfmt='milos'):
"""
Sample N derivations in maximum K iterations with Slice Sampling
"""
if intersection == 'nederhof':
logging.info('Using Nederhof parser')
parser_type = SlicedNederhof
elif intersection == 'earley':
parser_type = SlicedEarley
logging.info('Using Earley parser')
else:
raise NotImplementedError('I do not know this algorithm: %s' % intersection)
samples = []
# the initial conditions function is only implemented for the 'milos' grammarformat,
# this could be extended to other grammar formats as well.
if grammarfmt == 'milos':
logging.debug('Calculating initial conditions...')
# calculate the initial conditions (first derivation (i.e. seed))
initial_conditions = initialise(wcfg, wfsa, root, goal, intersection)
# begin with sampling with respect to the initial conditions
slice_vars = SliceVariable(a=a[1], b=b[1], conditions=initial_conditions)
else:
slice_vars = SliceVariable(a=a[0], b=b[0])
it = 0
while len(samples) < n_samples and it < max_iterations:
it += 1
if it % 10 == 0:
logging.info('it=%d samples=%d', it, len(samples))
d = sliced_sample(root, goal, parser_type(wcfg, wfsa, slice_vars))
if d is not None:
if n_burn > 0: # in case we are burning derivations, we do not add them to the list
n_burn -= 1 # but we still use them to update the slice variables
else:
samples.append(d)
# because we have a derivation
# we reset the assignments of the slice variables
# we fix new conditions
# and we move on to the second pair of parameters of the beta
conditions = get_conditions(d)
slice_vars.reset(conditions, a[1], b[1])
else:
# because we do not have a derivation
# but we are indeed finishing one iteration
# we reset the assignments of the slice variables
# however we leave the conditions unchanged
# similarly, we do not change the parameters of the beta
slice_vars.reset()
counts = Counter(tuple(d) for d in samples)
for d, n in counts.most_common():
score = sum(r.log_prob for r in d)
print '# n=%s estimate=%s score=%s' % (n, float(n)/len(samples), score)
tree = make_nltk_tree(d)
inline_tree = inlinetree(tree)
print inline_tree, "\n"
def edge_uniform_weight(edge, goal, slicevars):
"""
Return a uniform view of the edge's log-probability.
:param edge: an edge
:param goal: the goal node (gets a special treatment because there are no slice variables for it)
:param slicevars: a SliceVariable object
:returns: 1/beta.pdf(u_s; a, b)
"""
if edge.lhs == goal:
# rules rooted by the goal symbol have probability 1 (or 0 in log-domain) and there is no slice variable for the goal symbol
return 0.0
else:
sym, start, end = parse_annotated_nonterminal(edge.lhs)
return slicevars.weight(sym, start, end, edge.log_prob)
def sliced_sample(root, goal, parser):
"""
Sample a derivation given a wcfg and a wfsa, with Slice Sampling, a
form of MCMC-sampling
"""
logging.debug('Parsing...')
forest = parser.do(root, goal)
if not forest:
logging.debug('NO PARSE FOUND')
return None
else:
logging.debug('Forest: rules=%d', len(forest))
logging.debug('Topsorting...')
# sort the forest
sorted_nodes = top_sort(forest)
# calculate the inside weight of the sorted forest
logging.debug('Inside...')
# here we compute inside weights, however with a new uniform weight function over edges
inside_prob = inside(forest, sorted_nodes, omega=lambda edge: edge_uniform_weight(edge, goal, parser.slice_vars))
logging.debug('Sampling...')
# retrieve a random derivation, with respect to the inside weight distribution
# again, we sample with respect to a uniform function over edges
gen_sampling = GeneralisedSampling(forest, inside_prob, omega=lambda edge: edge_uniform_weight(edge, goal, parser.slice_vars))
d = gen_sampling.sample(goal)
return d
def main(args):
if args.profile:
import cProfile
pr = cProfile.Profile()
pr.enable()
core(args)
pr.disable()
pr.dump_stats(args.profile)
else:
core(args)
def core(args):
if args.verbose:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
else:
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(levelname)s %(message)s')
logging.info('Loading grammar...')
if args.log:
wcfg = load_grammar(args.grammar, args.grammarfmt, transform=math.log)
else:
wcfg = load_grammar(args.grammar, args.grammarfmt, transform=float)
logging.info(' %d rules', len(wcfg))
jobs = [input_str.strip() for input_str in args.input]
for jid, input_str in enumerate(jobs, 1):
sentence, extra_rules = make_sentence(input_str, wcfg.terminals, args.unkmodel, args.default_symbol, split_bars=args.split_input)
logging.info('[%d/%d] Parsing %d words: %s', jid, len(jobs), len(sentence), ' '.join(sentence.words))
wcfg.update(extra_rules)
start = time.time()
sliced_sampling(wcfg, sentence.fsa,
make_nonterminal(args.start),
make_nonterminal(args.goal),
args.samples, args.burn, args.max,
args.a, args.b,
args.intersection,
args.grammarfmt)
end = time.time()
logging.info("Duration %ss", end - start)
def argparser():
"""parse command line arguments"""
parser = argparse.ArgumentParser(prog='mcmcparse')
parser.description = 'MCMC Earley parser'
parser.formatter_class = argparse.ArgumentDefaultsHelpFormatter
parser.add_argument('grammar',
type=str,
help='path to CFG rules (or prefix in case of discodop format)')
parser.add_argument('input', nargs='?',
type=argparse.FileType('r'), default=sys.stdin,
help='input corpus (one sentence per line)')
parser.add_argument('--split-input',
action='store_true',
help='assumes the input is given separated by triple bars')
parser.add_argument('--intersection',
type=str, default='nederhof', choices=['nederhof', 'earley'],
help="intersection algorithm (nederhof: bottom-up; earley: top-down)")
parser.add_argument('--log',
action='store_true',
help='applies the log transform to the probabilities of the rules')
parser.add_argument('--start',
type=str, default='S',
help="start symbol of the grammar")
parser.add_argument('--goal',
type=str, default='GOAL',
help="goal symbol for intersection")
parser.add_argument('--samples',
type=int, default=100,
help='The number of samples')
parser.add_argument('--burn',
type=int, default=0,
help='The number of initial samples to discard')
parser.add_argument('--max',
type=int, default=1000,
help='The maximum number of iterations')
parser.add_argument('-a',
type=float, nargs=2, default=[0.1, 0.3], metavar='BEFORE AFTER',
help='a, first Beta parameter before and after finding the first derivation')
parser.add_argument('-b',
type=float, nargs=2, default=[1.0, 1.0], metavar='BEFORE AFTER',
help='b, second Beta parameter before and after finding the first derivation')
parser.add_argument('--unkmodel',
type=str, default=None,
choices=['passthrough', 'stfdbase', 'stfd4', 'stfd6'],
help="unknown word model")
parser.add_argument('--default-symbol',
type=str, default='X',
help='default nonterminal (use for pass-through rules)')
parser.add_argument('--verbose', '-v',
action='store_true',
help='increase the verbosity level')
parser.add_argument('--grammarfmt',
type=str, default='bar', choices=['bar', 'discodop', 'milos'],
help="grammar format ('bar' is the native format)")
parser.add_argument('--profile',
help='enables profiling')
return parser
if __name__ == '__main__':
main(argparser().parse_args())
| {
"content_hash": "13a803b941834f6ce58e22330ad24d23",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 137,
"avg_line_length": 36.62352941176471,
"alnum_prop": 0.6201413427561837,
"repo_name": "wilkeraziz/pcfg-sampling",
"id": "43d3d20467ef69e52c9fc12479778e44f647b642",
"size": "12452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mcmcparse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93582"
}
],
"symlink_target": ""
} |
import datetime
import functools
import json
from django import http
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.db import connection, transaction
import olympia.core.logger
from . import models as context
task_log = olympia.core.logger.getLogger('z.task')
def login_required(f=None, redirect=True):
"""
Like Django's login_required, but with to= instead of next=.
If redirect=False then we return 401 instead of redirecting to the
login page. That's nice for ajax views.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(request, *args, **kw):
# Prevent circular ref in accounts.utils
from olympia.accounts.utils import redirect_for_login
if request.user.is_authenticated:
return func(request, *args, **kw)
else:
if redirect:
return redirect_for_login(request)
else:
return http.HttpResponse(status=401)
return wrapper
if f:
return decorator(f)
else:
return decorator
def post_required(f):
@functools.wraps(f)
def wrapper(request, *args, **kw):
if request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
else:
return f(request, *args, **kw)
return wrapper
def permission_required(permission):
def decorator(f):
@functools.wraps(f)
@login_required
def wrapper(request, *args, **kw):
from olympia.access import acl
if acl.action_allowed(request, permission):
return f(request, *args, **kw)
else:
raise PermissionDenied
return wrapper
return decorator
def json_response(response, has_trans=False, status_code=200):
"""
Return a response as JSON. If you are just wrapping a view,
then use the json_view decorator.
"""
# to avoid circular imports with users.models
from .utils import AMOJSONEncoder
if has_trans:
response = json.dumps(response, cls=AMOJSONEncoder)
else:
response = json.dumps(response)
return http.HttpResponse(response,
content_type='application/json',
status=status_code)
def json_view(f=None, has_trans=False, status_code=200):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
response = func(*args, **kw)
if isinstance(response, http.HttpResponse):
return response
else:
return json_response(response, has_trans=has_trans,
status_code=status_code)
return wrapper
if f:
return decorator(f)
else:
return decorator
json_view.error = lambda s: http.HttpResponseBadRequest(
json.dumps(s), content_type='application/json')
def use_primary_db(f):
@functools.wraps(f)
def wrapper(*args, **kw):
with context.use_primary_db():
return f(*args, **kw)
return wrapper
def set_modified_on(f):
"""
Will update the modified timestamp on the objects provided through
the `set_modified_on` keyword argument, a short time after the wrapped
function exits successfully (returns a truthy value).
If that function returns a dict, it will also use that dict as additional
keyword arguments to update on the provided objects.
"""
from olympia.amo.tasks import set_modified_on_object
@functools.wraps(f)
def wrapper(*args, **kw):
obj_info = kw.pop('set_modified_on', None)
# obj_info is a tuple in the form of (app_label, model_name, pk)
result = f(*args, **kw)
if obj_info and result:
# If the function returned a dict, pass that dict down as
# kwargs to the set_modified_on_object task. Useful to set
# things like icon hashes.
kwargs_from_result = result if isinstance(result, dict) else {}
task_log.info('Delaying setting modified on object: %s, %s' %
(obj_info[0], obj_info[1]))
# Execute set_modified_on_object in NFS_LAG_DELAY seconds. This
# allows us to make sure any changes have been written to disk
# before changing modification date and/or image hashes stored
# on objects - otherwise we could end up caching an old version
# of an image on CDNs/clients for a very long time.
set_modified_on_object.apply_async(
args=obj_info,
kwargs=kwargs_from_result,
eta=(datetime.datetime.now() +
datetime.timedelta(seconds=settings.NFS_LAG_DELAY)))
return result
return wrapper
def allow_cross_site_request(f):
"""Allow other sites to access this resource, see
https://developer.mozilla.org/en/HTTP_access_control."""
@functools.wraps(f)
def wrapper(request, *args, **kw):
response = f(request, *args, **kw)
"""If Access-Control-Allow-Credentials isn't set, the browser won't
return data required cookies to see. This is a good thing, let's keep
it that way."""
response['Access-Control-Allow-Origin'] = '*'
response['Access-Control-Allow-Methods'] = 'GET'
return response
return wrapper
def allow_mine(f):
@functools.wraps(f)
def wrapper(request, user_id, *args, **kw):
"""
If the author is `mine` then show the current user's collection
(or something).
"""
# Prevent circular ref in accounts.utils
from olympia.accounts.utils import redirect_for_login
if user_id == 'mine':
if not request.user.is_authenticated:
return redirect_for_login(request)
user_id = request.user.id
return f(request, user_id, *args, **kw)
return wrapper
def atomic(fn):
"""Set the transaction isolation level to SERIALIZABLE and then delegate
to transaction.atomic to run the specified code atomically. The
SERIALIZABLE level will run SELECTs in LOCK IN SHARE MODE when used in
conjunction with transaction.atomic.
Docs: https://dev.mysql.com/doc/refman/5.6/en/set-transaction.html.
"""
# TODO: Make this the default for all transactions.
@functools.wraps(fn)
@use_primary_db
def inner(*args, **kwargs):
cursor = connection.cursor()
cursor.execute('SET TRANSACTION ISOLATION LEVEL SERIALIZABLE')
with transaction.atomic():
return fn(*args, **kwargs)
# The non_atomic version is essentially just a non-decorated version of the
# function. This is just here to handle the fact that django's tests are
# run in a transaction and setting this will make mysql blow up. You can
# mock your function to the non-atomic version to make it run in a test.
#
# with mock.patch('module.func', module.func.non_atomic):
# test_something()
inner.non_atomic = fn
return inner
| {
"content_hash": "992948e6ed58ea9557e94b76f54f13c3",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 79,
"avg_line_length": 34.88780487804878,
"alnum_prop": 0.6223434004474273,
"repo_name": "aviarypl/mozilla-l10n-addons-server",
"id": "63b140d96581b9d2cc005c38cbbe3f2bf6675522",
"size": "7152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/amo/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "809734"
},
{
"name": "Dockerfile",
"bytes": "2898"
},
{
"name": "HTML",
"bytes": "515798"
},
{
"name": "JavaScript",
"bytes": "1070508"
},
{
"name": "Makefile",
"bytes": "827"
},
{
"name": "PLSQL",
"bytes": "316"
},
{
"name": "PLpgSQL",
"bytes": "10596"
},
{
"name": "Python",
"bytes": "5462821"
},
{
"name": "SQLPL",
"bytes": "645"
},
{
"name": "Shell",
"bytes": "8821"
},
{
"name": "Smarty",
"bytes": "1388"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .. import *
import six
from django.utils.hashcompat import md5_constructor, sha_constructor
from django.utils.encoding import smart_str
from django.contrib.auth.models import AnonymousUser
from django.utils.translation import ugettext_lazy as _
import datetime
REDIRECT_FIELD_NAME = 'next'
def get_hexdigest(algorithm, salt, raw_password):
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'md5':
return md5_constructor(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
return sha_constructor(salt + raw_password).hexdigest()
raise ValueError('Got unknown password algorithm type in password')
@six.python_2_unicode_compatible
class User(Document):
"""A User document that aims to mirror most of the API specified by Django
at http://docs.djangoproject.com/en/dev/topics/auth/#users
"""
username = StringField(max_length=30, required=True,
verbose_name=_('username'),
help_text=_("Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters"))
first_name = StringField(max_length=30,
verbose_name=_('first name'))
last_name = StringField(max_length=30,
verbose_name=_('last name'))
email = EmailField(verbose_name=_('e-mail address'))
password = StringField(max_length=128,
verbose_name=_('password'),
help_text=_("Use '[algo]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
is_staff = BooleanField(default=False,
verbose_name=_('staff status'),
help_text=_("Designates whether the user can log into this admin site."))
is_active = BooleanField(default=True,
verbose_name=_('active'),
help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = BooleanField(default=False,
verbose_name=_('superuser status'),
help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = DateTimeField(default=datetime.datetime.now,
verbose_name=_('last login'))
date_joined = DateTimeField(default=datetime.datetime.now,
verbose_name=_('date joined'))
meta = {
'indexes': [
{'fields': ['username'], 'unique': True}
]
}
def __str__(self):
return self.username
def get_full_name(self):
"""Returns the users first and last names, separated by a space.
"""
full_name = u'%s %s' % (self.first_name or '', self.last_name or '')
return full_name.strip()
def is_anonymous(self):
return False
def is_authenticated(self):
return True
def set_password(self, raw_password):
"""Sets the user's password - always use this rather than directly
assigning to :attr:`~mongoengine.django.auth.User.password` as the
password is hashed before storage.
"""
from random import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random()), str(random()))[:5]
hash = get_hexdigest(algo, salt, raw_password)
self.password = '%s$%s$%s' % (algo, salt, hash)
self.save()
return self
def check_password(self, raw_password):
"""Checks the user's password against a provided password - always use
this rather than directly comparing to
:attr:`~mongoengine.django.auth.User.password` as the password is
hashed before storage.
"""
algo, salt, hash = self.password.split('$')
return hash == get_hexdigest(algo, salt, raw_password)
@classmethod
def create_user(cls, username, password, email=None):
"""Create (and save) a new user with the given username, password and
email address.
"""
now = datetime.datetime.now()
# Normalize the address by lowercasing the domain part of the email
# address.
if email is not None:
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = cls(username=username, email=email, date_joined=now)
user.set_password(password)
user.save()
return user
def get_and_delete_messages(self):
return []
class MongoEngineBackend(object):
"""Authenticate using MongoEngine and mongoengine.django.auth.User.
"""
supports_object_permissions = False
supports_anonymous_user = False
supports_inactive_user = False
def authenticate(self, username=None, password=None):
user = User.objects(username=username).first()
if user:
if password and user.check_password(password):
return user
return None
def get_user(self, user_id):
return User.objects.with_id(user_id)
def get_user(userid):
"""Returns a User object from an id (User.id). Django's equivalent takes
request, but taking an id instead leaves it up to the developer to store
the id in any way they want (session, signed cookie, etc.)
"""
if not userid:
return AnonymousUser()
return MongoEngineBackend().get_user(userid) or AnonymousUser()
| {
"content_hash": "4d4e905f1aa54305875c17f70951cf92",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 146,
"avg_line_length": 37.6953642384106,
"alnum_prop": 0.6055867884750527,
"repo_name": "conversocial/mongoengine",
"id": "9eb45e773553881d2c30e366cf74fb74d7d4d257",
"size": "5692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongoengine/django/auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "511"
},
{
"name": "Python",
"bytes": "543486"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import json
import gevent
import logging
from django.conf import settings
from django.views.generic.base import View
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from queue_manager.sqs import sqs_writer
from queue_manager.rabbitmq import rabbitmq_writer
from api.json_responses import json_error, json_ok
logger = logging.getLogger('listener_logger')
class IncidentListenerAPI(View):
"""
API for accepting incidents in JSON format
"""
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(IncidentListenerAPI, self).dispatch(*args, **kwargs)
def post(self, request):
"""
Accepts JSON
{
"event":
{
"eventid": <int>,
"element": <string>,
"message": <string>,
},
"timestamp": <int>
}
:return JSON response:
"""
try:
parsed_json = json.loads(request.body)
except Exception as e:
return json_error('Invalid JSON' % e)
for k in settings.INCIDENT_PARAMS:
if k not in parsed_json:
return json_error('Missing JSON key:%s' % k)
if settings.QUEUE_TYPE in ['SQS', 'sqs']:
queue_writer = sqs_writer.send_to_sqs
elif settings.QUEUE_TYPE in ['RABBITMQ', 'rabbitmq']:
queue_writer = rabbitmq_writer.send_to_rabbitmq
else:
raise ValueError('Incorrect value "%s" for QUEUE_TYPE in %s' %
(settings.QUEUE_TYPE, settings.SETTINGS_MODULE))
gevent.spawn(queue_writer, request.body)
gevent.sleep(0)
return json_ok('accepted') | {
"content_hash": "44b5d20832d89d456cd6c4daccac5439",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 31.67241379310345,
"alnum_prop": 0.5895481763745237,
"repo_name": "CitoEngine/cito_engine",
"id": "b9cb760a8666d079245fefd32a9acf18392188cf",
"size": "1837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/api/incident_listener.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "51363"
},
{
"name": "HTML",
"bytes": "126362"
},
{
"name": "JavaScript",
"bytes": "99050"
},
{
"name": "Python",
"bytes": "306077"
},
{
"name": "Shell",
"bytes": "3087"
}
],
"symlink_target": ""
} |
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
idle_timeout=0, hard_timeout=0,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
| {
"content_hash": "5a49f252a2cdfe618ba9f809a8da5413",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 78,
"avg_line_length": 37.02298850574713,
"alnum_prop": 0.609438062713443,
"repo_name": "muzixing/ryu",
"id": "899b3845d49d6fdf5dbeead16af00531e1cca74b",
"size": "3834",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ryu/app/simple_switch_13.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "410"
},
{
"name": "CSS",
"bytes": "7182"
},
{
"name": "Erlang",
"bytes": "872692"
},
{
"name": "HTML",
"bytes": "4612"
},
{
"name": "JavaScript",
"bytes": "46535"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "5236694"
},
{
"name": "Shell",
"bytes": "15461"
}
],
"symlink_target": ""
} |
import re
from configparser import ConfigParser
from os.path import isfile
from .client import ArchivesSpaceClient
from .reporter import CSVReporter
from .validator import Validator
class DACSspace:
"""Base DACSspace class. Fetches data from AS, validates and reports results."""
def __init__(self, as_config, csv_filepath):
"""Checks CSV and AS config filepaths.
Args:
as_config (str): filepath to ArchivesSpace configuration file.
csv_filepath (str): filepath at which to save results file.
"""
if not csv_filepath.endswith(".csv"):
raise ValueError("File must have .csv extension")
if re.search(r'[*?:"<>|]', csv_filepath):
raise ValueError(
'File name cannot contain the following characters: * ? : " < > | ')
self.csv_filepath = csv_filepath
if not isfile(as_config):
raise IOError(
"Could not find an ArchivesSpace configuration file at {}".format(as_config))
config = ConfigParser()
config.read(as_config)
self.as_config = (
config.get('ArchivesSpace', 'baseurl'),
config.get('ArchivesSpace', 'user'),
config.get('ArchivesSpace', 'password'),
config.get('ArchivesSpace', 'repository'))
def run(self, published_only, invalid_only,
schema_identifier, schema_filepath):
client = ArchivesSpaceClient(*self.as_config)
validator = Validator(schema_identifier, schema_filepath)
reporter = CSVReporter(self.csv_filepath)
data = client.get_resources(published_only)
results = [validator.validate_data(obj) for obj in data]
reporter.write_report(results, invalid_only)
| {
"content_hash": "fcba48409c4339705b28d3d8183de47d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 93,
"avg_line_length": 39.22222222222222,
"alnum_prop": 0.6328611898016997,
"repo_name": "RockefellerArchiveCenter/DACSspace",
"id": "75a3d81418674b308c2cdc161299bf4d32f4eca7",
"size": "1765",
"binary": false,
"copies": "1",
"ref": "refs/heads/base",
"path": "dacsspace/dacsspace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23982"
}
],
"symlink_target": ""
} |
from django.db import connection
from common import BaseEvolutionOperations
class EvolutionOperations(BaseEvolutionOperations):
def rename_column(self, opts, old_field, new_field):
if old_field.column == new_field.column:
# No Operation
return []
qn = connection.ops.quote_name
params = (qn(opts.db_table), qn(old_field.column), qn(new_field.column))
return ['ALTER TABLE %s RENAME COLUMN %s TO %s;' % params]
| {
"content_hash": "751860036361259990158fd34bf57028",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 80,
"avg_line_length": 34.357142857142854,
"alnum_prop": 0.6528066528066528,
"repo_name": "timbroder/ai-stager",
"id": "f1fade644f062ba851b92b80357469b8c9cdfcb4",
"size": "481",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stager/django_evolution/db/postgresql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "176775"
},
{
"name": "Python",
"bytes": "320655"
},
{
"name": "Racket",
"bytes": "1101"
},
{
"name": "Shell",
"bytes": "640"
}
],
"symlink_target": ""
} |
__author__ = 'lqrz'
import codecs
import logging
# import glob
import sys
logger = logging.getLogger('')
hdlr = logging.FileHandler('decompoundingEvaluation.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
if __name__ == '__main__':
goldFile = 'decompounding/test_set_len4.txt' # has to start with 2 lines of header #TODO:hardcoded
# # # resultsFiles = glob.glob('/home/lquiroz/jobs/decompound/100_046/output*/results.txt')
# resultsFiles = glob.glob('output*/results.txt')
# resultsFolder = 'decompounding' # without last /
if len(sys.argv) == 3:
goldFile = sys.argv[1]
resultFile = sys.argv[2]
elif len(sys.argv) > 1:
logger.error('Bad params.')
exit()
# resultsFiles = glob.glob(resultsFolder+'/output*/results.txt')
resultsFiles = list(resultFile)
logger.debug('Nr of result files: '+str(len(resultsFiles)))
goldCompounds = dict()
resultsCompounds = dict()
accuracy = 0
coverage = 0
noInputRepresentation = 0
noTailRepresentation = 0
noSplitsAtAll = 0
discardedSplits = 0
fgold = codecs.open(goldFile, 'r', encoding='utf-8')
totalResults = 0
for resultsFile in resultsFiles:
fresults = codecs.open(resultsFile, 'r', encoding='utf-8')
for l in fresults:
totalResults += 1
cleanLine = l.strip('\n').split('\t')
if cleanLine[0].strip() == cleanLine[1].strip() and cleanLine[2].strip() == '':
# We found possible splits, but they didnt pass rank and similarity thresholds
resultsCompounds[cleanLine[0]] = ''
discardedSplits += 1
continue
# We were able to split
resultSplit = ' '.join([cleanLine[1], cleanLine[2]]).strip()
resultsCompounds[cleanLine[0]] = resultSplit
coverage += 1
lineNr = 0
for l in fgold:
lineNr += 1
# Gold file must have 2 header lines
if lineNr < 3:
continue
goldSplit = set()
idx = l.strip('\n').find('|')
if idx != -1:
# multiple possibilities
compound = l.strip('\n').split('\t')[0].strip()
modifier1 = l.strip('\n').split('\t')[1].split('|')[0].strip()
modifier2 = l.strip('\n').split('\t')[1].split('|')[1].strip().title() # the 2nd modifier appears lowercased.\
# Should be upppercase
head = l.strip('\n').split('\t')[2].strip()
goldSplit.add(' '.join([modifier1, head]))
goldSplit.add(' '.join([modifier2, head]))
else:
compound = l.strip('\n').split('\t')[0].strip()
goldSplit.add(' '.join([l.strip('\n').split('\t')[1].strip(),l.strip('\n').split('\t')[2].strip()]))
if resultsCompounds[compound] in goldSplit:
accuracy += 1
assert lineNr-2 == totalResults, 'Total nr of lines in gold file does not match total nr lines in results file '+\
str(lineNr-2)+' '+str(totalResults)
assert noSplitsAtAll == (noTailRepresentation+noInputRepresentation), 'Error in nr of no splits.'
# Stats
logger.info('Total number of examples: '+str(lineNr-2))
logger.info('Examples for which no splits were found: '+str(noSplitsAtAll)+' '+\
str(noSplitsAtAll/float(lineNr-2)))
logger.info('No input representation found: '+str(noInputRepresentation)+' '+\
str(noInputRepresentation/float(lineNr-2)))
logger.info('No tail representation found: '+str(noTailRepresentation)+' '+\
str(noTailRepresentation/float(lineNr-2)))
logger.info('Examples for which weak prefixes were found: '+str(discardedSplits)+' '+\
str(discardedSplits/float(lineNr-2-noSplitsAtAll)))
logger.info('Nr of compounds that were split: '+str(coverage)+' '+str(coverage/float(lineNr-2))) # Measured against all word in gold file.
logger.info('Accuracy: '+str(accuracy)+' '+str(accuracy/float(coverage))) # Measured against split compounds.
logger.info('End') | {
"content_hash": "67e799416e30841a0142d229791303b2",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 142,
"avg_line_length": 37.910714285714285,
"alnum_prop": 0.5984455958549223,
"repo_name": "jodaiber/semantic_compound_splitting",
"id": "8fcadf363be276263a58e0a5a1f0a8dd58e74780",
"size": "4246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visualization_and_test/evaluateMosesDecompounding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "40101"
},
{
"name": "Python",
"bytes": "99790"
},
{
"name": "Shell",
"bytes": "5397"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from yawast.scanner.plugins.dns import basic
class TestGetText(TestCase):
def test_get_text(self):
recs = basic.get_text("adamcaudill.com")
self.assertTrue(len(recs) > 0)
for rec in recs:
if rec.startswith("v="):
self.assertEqual("v=spf1 mx a ptr include:_spf.google.com ~all", rec)
| {
"content_hash": "5d7ff08f8a0cb20a82e5d34c6cdbc362",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 85,
"avg_line_length": 26.714285714285715,
"alnum_prop": 0.6336898395721925,
"repo_name": "adamcaudill/yawast",
"id": "8dc586197cd59b1371e37d0c967543e876f1c55d",
"size": "594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_get_text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1740"
},
{
"name": "Python",
"bytes": "429901"
},
{
"name": "Shell",
"bytes": "282"
}
],
"symlink_target": ""
} |
"""Sentry logging module."""
from __future__ import absolute_import, print_function
import logging
import warnings
import pkg_resources
import six
from flask import g
from werkzeug.utils import import_string
from . import config
from .ext import InvenioLoggingBase
try:
pkg_resources.get_distribution('raven')
from raven.processors import Processor
except pkg_resources.DistributionNotFound:
class Processor(object):
"""Dummy class in case Sentry is not installed.."""
def __init__(self, *args, **kwargs):
"""Do nothing."""
pass
class InvenioLoggingSentry(InvenioLoggingBase):
"""Invenio-Logging extension for Sentry."""
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
# Only configure Sentry if SENTRY_DSN is set.
if app.config['SENTRY_DSN'] is None:
return
self.install_handler(app)
app.extensions['invenio-logging-sentry'] = self
def init_config(self, app):
"""Initialize configuration."""
for k in dir(config):
if k.startswith('LOGGING_SENTRY') or k.startswith('SENTRY_'):
app.config.setdefault(k, getattr(config, k))
def install_handler(self, app):
"""Install log handler."""
level = getattr(logging, app.config['LOGGING_SENTRY_LEVEL'])
logging_exclusions = None
if not app.config['LOGGING_SENTRY_PYWARNINGS']:
logging_exclusions = (
'raven',
'gunicorn',
'south',
'sentry.errors',
'django.request',
'dill',
'py.warnings')
if app.config['SENTRY_SDK']:
self.install_sentry_sdk_handler(app, logging_exclusions, level)
else:
self.install_raven_handler(app, logging_exclusions, level)
# Werkzeug only adds a stream handler if there's no other handlers
# defined, so when Sentry adds a log handler no output is
# received from Werkzeug unless we install a console handler
# here on the werkzeug logger.
if app.debug:
logger = logging.getLogger('werkzeug')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def install_sentry_sdk_handler(self, app, logging_exclusions, level):
"""Install sentry-python sdk log handler."""
import sentry_sdk
from sentry_sdk import configure_scope
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.flask import FlaskIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration
integrations = [FlaskIntegration()]
if app.config['LOGGING_SENTRY_CELERY']:
integrations.append(CeleryIntegration())
if app.config['LOGGING_SENTRY_SQLALCHEMY']:
integrations.append(SqlalchemyIntegration())
if app.config['LOGGING_SENTRY_REDIS']:
integrations.append(RedisIntegration())
sentry_sdk.init(
dsn=app.config['SENTRY_DSN'],
in_app_exclude=logging_exclusions,
integrations=integrations,
before_send=self.add_request_id_sentry_python,
)
with configure_scope() as scope:
scope.level = level
def install_raven_handler(self, app, logging_exclusions, level):
"""Install raven log handler."""
warnings.warn('The Raven library will be depricated.',
PendingDeprecationWarning)
from raven.contrib.celery import register_logger_signal, \
register_signal
from raven.contrib.flask import Sentry
from raven.handlers.logging import SentryHandler
cls = app.config['LOGGING_SENTRY_CLASS']
if cls:
if isinstance(cls, six.string_types):
cls = import_string(cls)
else:
cls = Sentry
sentry = cls(
app,
logging=True,
level=level,
logging_exclusions=logging_exclusions,
)
app.logger.addHandler(SentryHandler(client=sentry.client, level=level))
# Capture warnings from warnings module
if app.config['LOGGING_SENTRY_PYWARNINGS']:
self.capture_pywarnings(
SentryHandler(sentry.client))
# Setup Celery logging to Sentry
if app.config['LOGGING_SENTRY_CELERY']:
try:
register_logger_signal(sentry.client, loglevel=level)
except TypeError:
# Compatibility mode for Raven<=5.1.0
register_logger_signal(sentry.client)
register_signal(sentry.client)
def add_request_id_sentry_python(self, event, hint):
"""Add the request id as a tag."""
if g and hasattr(g, 'request_id'):
tags = event.get('tags') or []
tags.append(['request_id', g.request_id])
event['tags'] = tags
return event
class RequestIdProcessor(Processor):
"""Sentry event request processor for adding the request id as a tag."""
def process(self, data, **kwargs):
"""Process event data."""
data = super(RequestIdProcessor, self).process(data, **kwargs)
if g and hasattr(g, 'request_id'):
tags = data.get('tags', {})
tags['request_id'] = g.request_id
data['tags'] = tags
return data
| {
"content_hash": "b218d4075775a66af5bf9f77386de9ca",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 79,
"avg_line_length": 35.348101265822784,
"alnum_prop": 0.6098478066248881,
"repo_name": "inveniosoftware/invenio-logging",
"id": "f841449df72cab68ad7efe08db3f557533d9563b",
"size": "5820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_logging/sentry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39038"
},
{
"name": "Shell",
"bytes": "384"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import weakref
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class _ModelWithOptimizer(training.Model):
def __init__(self):
super(_ModelWithOptimizer, self).__init__()
self.dense = core.Dense(1)
self.optimizer = adam.AdamOptimizer(0.01)
@def_function.function(
input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.float32)))
def call(self, x, y):
with backprop.GradientTape() as tape:
loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.)
trainable_variables = self.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return {'loss': loss}
class _HasDecoratedMethod(object):
@def_function.function
def f(self, x):
return x * 3.
class DefFunctionTest(test.TestCase):
def testNoVariables(self):
@def_function.function
def fn(x):
return 2 * x
self.assertAllEqual(fn(constant_op.constant(4.0)), 8.0)
def testFailIfVariablesAreCreatedMoreThanOnce(self):
@def_function.function
def fn(x):
return variables.Variable(1.0) + x
with self.assertRaises(ValueError):
fn(1.0)
def testFailIfVariablesAreCreatedMoreThanOnceNoWeakRef(self):
state = []
@def_function.function
def fn(x):
state.append(variables.Variable(1.0))
return state[-1] + x
with self.assertRaises(ValueError):
fn(1.0)
def testCorrectVariableCreation(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testFunctionInitializer(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(lambda: 2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
def testFunctionInitializationFunction(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
init_fn = fn.get_initialization_function(constant_op.constant(1.0))
self.assertEqual(len(state), 1)
self.assertFalse(
resource_variable_ops.var_is_initialized_op(state[0].handle))
init_fn()
self.assertEqual(state[0].numpy(), 2.0)
def testVariableInitializerNotConstant(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testLegacyGraphModeVariables(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 2.0)
self.assertAllEqual(self.evaluate(result), 6.0)
def testLegacyGraphModeVariablesNonTrivialInitializer(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
two = constant_op.constant(2.0)
four = two * two
two_again = math_ops.sqrt(four)
state.append(variables.Variable(two_again + four))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 6.0)
self.assertAllEqual(self.evaluate(result), 18.0)
def testLegacyGraphModeInputDependentInitializerFails(self):
with ops.Graph().as_default():
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
with self.assertRaises(ValueError):
fn(constant_op.constant(3.0))
def testMethod(self):
class MyModel(object):
def __init__(self):
self.var = None
@def_function.function
def apply(self, x):
if self.var is None:
self.var = variables.Variable(2.0)
return self.var * x
m0 = MyModel()
self.assertAllEqual(m0.apply(3.0), 6.0)
# Calling twice to exercise that we do not recreate variables.
m0.var.assign(3.0)
self.assertAllEqual(m0.apply(3.0), 9.0)
m1 = MyModel()
self.assertAllEqual(m1.apply(3.0), 6.0)
def test_functools_partial(self):
self.assertAllClose(
3.,
def_function.function(functools.partial(lambda x, y: x + y, 1.))(
constant_op.constant(2.)))
def test_unspecified_default_argument(self):
wrapped = def_function.function(
lambda x, y=2: x + y,
input_signature=[tensor_spec.TensorSpec((), dtypes.int32)])
self.assertEqual(3, wrapped(constant_op.constant(1)).numpy())
def test_optimizer(self):
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model = _ModelWithOptimizer()
model(x, y)
def test_concrete_function_from_signature(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def compute(x):
return 2. * x
concrete = compute.get_concrete_function()
self.assertAllClose(1., concrete(constant_op.constant(0.5)))
concrete = compute.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
self.assertAllClose(4., concrete(constant_op.constant(2.)))
input_signature, = compute._cached_input_signatures
self.assertEqual(
tuple(input_signature),
(tensor_spec.TensorSpec(None, dtypes.float32),))
def test_serialization_signature_cache(self):
@def_function.function
def f(x, y):
return x, y
f(constant_op.constant([[3., 4.]]), constant_op.constant([2.]))
f(constant_op.constant([[3, 4, 5]]), constant_op.constant([2]))
self.assertEqual(
set(f._cached_input_signatures),
set(((tensor_spec.TensorSpec([1, 2], dtypes.float32),
tensor_spec.TensorSpec([1], dtypes.float32)),
(tensor_spec.TensorSpec([1, 3], dtypes.int32),
tensor_spec.TensorSpec([1], dtypes.int32)))))
@test_util.assert_no_garbage_created
def testFunctionReferenceCycles(self):
fn = def_function.function(lambda x: 2. * x)
fn(constant_op.constant(4.0))
weak_fn = weakref.ref(fn)
del fn
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
@test_util.assert_no_garbage_created
def testMethodReferenceCycles(self):
has_decorated_method = _HasDecoratedMethod()
has_decorated_method.f(constant_op.constant(5.))
weak_fn = weakref.ref(has_decorated_method.f)
del has_decorated_method
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| {
"content_hash": "209db2801b2fc9ba87700e42ab79e96c",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 78,
"avg_line_length": 29.867857142857144,
"alnum_prop": 0.6663876599306469,
"repo_name": "Bismarrck/tensorflow",
"id": "77cc8ee981a176f9f57028832039fa9bfe1f47a1",
"size": "9053",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/eager/def_function_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "493885"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "53117668"
},
{
"name": "CMake",
"bytes": "207176"
},
{
"name": "Dockerfile",
"bytes": "39024"
},
{
"name": "Go",
"bytes": "1303624"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "893928"
},
{
"name": "Jupyter Notebook",
"bytes": "2657814"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "68402"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102511"
},
{
"name": "PHP",
"bytes": "5172"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "43480067"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "497472"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "Fisher", sigma = 0.0, exog_count = 100, ar_order = 12); | {
"content_hash": "21b72392005e34a4dfda1536304aae9c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 163,
"avg_line_length": 37.57142857142857,
"alnum_prop": 0.7034220532319392,
"repo_name": "antoinecarme/pyaf",
"id": "08198878228c7b7894c96833d3bd5dc70ad2f8a6",
"size": "263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Fisher/trend_Lag1Trend/cycle_7/ar_12/test_artificial_128_Fisher_Lag1Trend_7_12_100.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from voluptuous import Schema, Required, All, Length, Range, Url
modelresource_form_schema = Schema({
Required('url'): All(unicode,Url()),
Required('filename'):unicode,
Required('resource_type'):unicode
}) | {
"content_hash": "27d79a06d71c0533c2f25719d083ff04",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 64,
"avg_line_length": 30.285714285714285,
"alnum_prop": 0.7264150943396226,
"repo_name": "itsrifat/vwadaptor",
"id": "a14cc498e8904dc5beb53835df1a60b5100f9ac1",
"size": "212",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vwadaptor/validators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "607"
},
{
"name": "Python",
"bytes": "41836"
},
{
"name": "Shell",
"bytes": "177"
}
],
"symlink_target": ""
} |
'''Type enumerations'''
from enum import unique
from neurom.utils import OrderedEnum
@unique
class NeuriteIter(OrderedEnum):
'''Neurite iteration orders'''
FileOrder = 1 # Order in which neurites appear in the file
# NRN simulator order: soma -> axon -> basal -> apical
# Coming from:
# https://github.com/neuronsimulator/nrn/blob/2dbf2ebf95f1f8e5a9f0565272c18b1c87b2e54c/share/lib/hoc/import3d/import3d_gui.hoc#L874
NRN = 2
@unique
class NeuriteType(OrderedEnum):
'''Enum representing valid tree types'''
undefined = 1
soma = 2
axon = 3
basal_dendrite = 4
apical_dendrite = 5
all = 32
NEURITES = (NeuriteType.all,
NeuriteType.axon,
NeuriteType.basal_dendrite,
NeuriteType.apical_dendrite)
ROOT_ID = -1
def tree_type_checker(*ref):
'''Tree type checker functor
Returns:
Functor that takes a tree, and returns true if that tree matches any of
NeuriteTypes in ref
Ex:
>>> from neurom.core.types import NeuriteType, tree_type_checker
>>> tree_filter = tree_type_checker(NeuriteType.axon, NeuriteType.basal_dendrite)
>>> nrn.i_neurites(tree.isegment, tree_filter=tree_filter)
'''
ref = tuple(ref)
if NeuriteType.all in ref:
def check_tree_type(_):
'''Always returns true'''
return True
else:
def check_tree_type(tree):
'''Check whether tree has the same type as ref
Returns:
True if ref in the same type as tree.type or ref is NeuriteType.all
'''
return tree.type in ref
return check_tree_type
def dendrite_filter(n):
'''Select only dendrites'''
return n.type == NeuriteType.basal_dendrite or n.type == NeuriteType.apical_dendrite
def axon_filter(n):
'''Select only axons'''
return n.type == NeuriteType.axon
| {
"content_hash": "98d47b2be7b3485d7e9e3cda409fbad2",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 135,
"avg_line_length": 25.756756756756758,
"alnum_prop": 0.6385099685204617,
"repo_name": "mgeplf/NeuroM",
"id": "40e1ff86ec2d3c720c06ff3afd9477627744bcbc",
"size": "3600",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neurom/core/types.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "697203"
},
{
"name": "Jupyter Notebook",
"bytes": "2138829"
},
{
"name": "Python",
"bytes": "572707"
}
],
"symlink_target": ""
} |
"""
Some help came from:
https://stackoverflow.com/questions/21964061/qt-qlineedit-error-popup-balloon-message
"""
import sys
import Qt.QtCore as QtCore
import Qt.QtWidgets as QtWidgets
class SuggestionListWidget(QtWidgets.QWidget):
def __init__(self, parent):
super(SuggestionListWidget, self).__init__(parent)
self.setContentsMargins(0, 0, 0, 0)
self.setAttribute(QtCore.Qt.WA_ShowWithoutActivating)
self.setWindowFlags(QtCore.Qt.Tool | QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.listWidget = QtWidgets.QListWidget()
self.button = QtWidgets.QPushButton("Button")
self.layout.addWidget(self.listWidget)
self.layout.addWidget(self.button)
# def moveEvent(self, event):
# print('moveEvent:', event, event.pos(), event.oldPos())
# super(SuggestionListWidget, self).moveEvent(event)
class WidgetErrorChecking(SuggestionListWidget):
def __init__(self, parent, widget):
super(WidgetErrorChecking, self).__init__(parent)
self.widget = widget
self.hide()
if isinstance(widget, QtWidgets.QLineEdit):
widget.textEdited.connect(self.showSuggestions)
def showSuggestions(self):
"""
Show the widget.
"""
message = "suggestions..."
# self.setText(message)
self.adjustSize()
self.update()
self.show()
self.move(self.widget.mapTo(self.parentWidget(),
self.widget.rect().bottomLeft()))
# def checkWidgetValue(self, value):
# if not value:
# return
#
# try:
# value = float(value)
# except ValueError:
# value = 0.0
#
# if 0.0 > value:
# self.showMessage('Needs to be greater then 0.0')
# elif value > 100:
# self.showMessage('Needs to be less then 100.0')
# else:
# self.hide()
#
# def showMessage(self, message = None):
# """
# Show the widget.
# """
# self.setText(message)
# self.adjustSize()
# self.update()
# self.show()
#
# self.move(self.widget.mapTo(self.parentWidget(),
# self.widget.rect().bottomLeft()))
class LineEditSuggestion(QtWidgets.QLineEdit):
def __init__(self, parent):
super(LineEditSuggestion, self).__init__(parent)
# self.listWidget = QtWidgets.QListWidget()
self.listWidget = SuggestionListWidget(parent)
self.listWidget.hide()
self.textEdited.connect(self.showSuggestions)
# self.editingFinished.connect(self.hideSuggestions)
self.returnPressed.connect(self.hideSuggestions)
def focusInEvent(self, event):
print('focusInEvent:', event)
if event.reason() == QtCore.Qt.MouseFocusReason:
# print('The mouse triggered the event')
self.showSuggestions()
super(LineEditSuggestion, self).focusInEvent(event)
def focusOutEvent(self, event):
print('focusOutEvent:', event)
if event.reason() == QtCore.Qt.MouseFocusReason:
# print('The mouse triggered the event')
self.hideSuggestions()
super(LineEditSuggestion, self).focusInEvent(event)
def showSuggestions(self):
print('showSuggestions')
self.listWidget.adjustSize()
p = self.mapFromGlobal(-self.rect().bottomLeft())
self.listWidget.move(-p)
self.listWidget.update()
self.listWidget.show()
return
def hideSuggestions(self):
print('hideSuggestions')
if self.listWidget is not None:
self.listWidget.hide()
return
class Window(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.layout = QtWidgets.QHBoxLayout(self)
self.button = QtWidgets.QPushButton(self)
self.layout.addWidget(self.button)
self.lineEdit = LineEditSuggestion(self)
self.layout.addWidget(self.lineEdit)
# self.lineEditSuggestion = WidgetErrorChecking(self, self.lineEdit)
# self.lineEdit.cursorPositionChanged.connect(self.popup)
# def popup(self, index_before, index_after):
# # cursorPositionChanged(int, int)
# print('i1:', index_before, 'i2:', index_after)
# # self.listWidget = QtWidgets.QListWidget(self)
def main(argv):
app = QtWidgets.QApplication(argv)
ui = Window()
ui.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main(sys.argv)
| {
"content_hash": "5e7b557d8454e77d21b6b62ad1389f49",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 108,
"avg_line_length": 29.56875,
"alnum_prop": 0.6114986260832805,
"repo_name": "david-cattermole/qt-learning",
"id": "b49c0116cc115578fb21830af4ab414081007af2",
"size": "4731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/qtLearn/windows/examples/exampleSuggestionsPopup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "74"
},
{
"name": "Python",
"bytes": "327498"
},
{
"name": "Shell",
"bytes": "197"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.