code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Virtual adversarial text models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
# Dependency imports
import tensorflow as tf
import adversarial_losses as adv_lib
import inputs as inputs_lib
import layers as layers_lib
flags = tf.app.flags
FLAGS = flags.FLAGS
# Flags governing adversarial training are defined in adversarial_losses.py.
# Classifier
flags.DEFINE_integer('num_classes', 2, 'Number of classes for classification')
# Data path
flags.DEFINE_string('data_dir', '/tmp/IMDB',
'Directory path to preprocessed text dataset.')
flags.DEFINE_string('vocab_freq_path', None,
'Path to pre-calculated vocab frequency data. If '
'None, use FLAGS.data_dir/vocab_freq.txt.')
flags.DEFINE_integer('batch_size', 64, 'Size of the batch.')
flags.DEFINE_integer('num_timesteps', 100, 'Number of timesteps for BPTT')
# Model architechture
flags.DEFINE_bool('bidir_lstm', False, 'Whether to build a bidirectional LSTM.')
flags.DEFINE_bool('single_label', True, 'Whether the sequence has a single '
'label, for optimization.')
flags.DEFINE_integer('rnn_num_layers', 1, 'Number of LSTM layers.')
flags.DEFINE_integer('rnn_cell_size', 512,
'Number of hidden units in the LSTM.')
flags.DEFINE_integer('cl_num_layers', 1,
'Number of hidden layers of classification model.')
flags.DEFINE_integer('cl_hidden_size', 30,
'Number of hidden units in classification layer.')
flags.DEFINE_integer('num_candidate_samples', -1,
'Num samples used in the sampled output layer.')
flags.DEFINE_bool('use_seq2seq_autoencoder', False,
'If True, seq2seq auto-encoder is used to pretrain. '
'If False, standard language model is used.')
# Vocabulary and embeddings
flags.DEFINE_integer('embedding_dims', 256, 'Dimensions of embedded vector.')
flags.DEFINE_integer('vocab_size', 86934,
'The size of the vocaburary. This value '
'should be exactly same as the number of the '
'vocabulary used in dataset. Because the last '
'indexed vocabulary of the dataset preprocessed by '
'my preprocessed code, is always <eos> and here we '
'specify the <eos> with the the index.')
flags.DEFINE_bool('normalize_embeddings', True,
'Normalize word embeddings by vocab frequency')
# Optimization
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate while fine-tuning.')
flags.DEFINE_float('learning_rate_decay_factor', 1.0,
'Learning rate decay factor')
flags.DEFINE_boolean('sync_replicas', False, 'sync_replica or not')
flags.DEFINE_integer('replicas_to_aggregate', 1,
'The number of replicas to aggregate')
# Regularization
flags.DEFINE_float('max_grad_norm', 1.0,
'Clip the global gradient norm to this value.')
flags.DEFINE_float('keep_prob_emb', 1.0, 'keep probability on embedding layer. '
'0.5 is optimal on IMDB with virtual adversarial training.')
flags.DEFINE_float('keep_prob_lstm_out', 1.0,
'keep probability on lstm output.')
flags.DEFINE_float('keep_prob_cl_hidden', 1.0,
'keep probability on classification hidden layer')
def get_model():
if FLAGS.bidir_lstm:
return VatxtBidirModel()
else:
return VatxtModel()
class VatxtModel(object):
"""Constructs training and evaluation graphs.
Main methods: `classifier_training()`, `language_model_training()`,
and `eval_graph()`.
Variable reuse is a critical part of the model, both for sharing variables
between the language model and the classifier, and for reusing variables for
the adversarial loss calculation. To ensure correct variable reuse, all
variables are created in Keras-style layers, wherein stateful layers (i.e.
layers with variables) are represented as callable instances of the Layer
class. Each time the Layer instance is called, it is using the same variables.
All Layers are constructed in the __init__ method and reused in the various
graph-building functions.
"""
def __init__(self, cl_logits_input_dim=None):
self.global_step = tf.train.get_or_create_global_step()
self.vocab_freqs = _get_vocab_freqs()
# Cache VatxtInput objects
self.cl_inputs = None
self.lm_inputs = None
# Cache intermediate Tensors that are reused
self.tensors = {}
# Construct layers which are reused in constructing the LM and
# Classification graphs. Instantiating them all once here ensures that
# variable reuse works correctly.
self.layers = {}
self.layers['embedding'] = layers_lib.Embedding(
FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings,
self.vocab_freqs, FLAGS.keep_prob_emb)
self.layers['lstm'] = layers_lib.LSTM(
FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out)
self.layers['lm_loss'] = layers_lib.SoftmaxLoss(
FLAGS.vocab_size,
FLAGS.num_candidate_samples,
self.vocab_freqs,
name='LM_loss')
cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size
self.layers['cl_logits'] = layers_lib.cl_logits_subgraph(
[FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim,
FLAGS.num_classes, FLAGS.keep_prob_cl_hidden)
@property
def pretrained_variables(self):
return (self.layers['embedding'].trainable_weights +
self.layers['lstm'].trainable_weights)
def classifier_training(self):
loss = self.classifier_graph()
train_op = optimize(loss, self.global_step)
return train_op, loss, self.global_step
def language_model_training(self):
loss = self.language_model_graph()
train_op = optimize(loss, self.global_step)
return train_op, loss, self.global_step
def classifier_graph(self):
"""Constructs classifier graph from inputs to classifier loss.
* Caches the VatxtInput object in `self.cl_inputs`
* Caches tensors: `cl_embedded`, `cl_logits`, `cl_loss`
Returns:
loss: scalar float.
"""
inputs = _inputs('train', pretrain=False)
self.cl_inputs = inputs
embedded = self.layers['embedding'](inputs.tokens)
self.tensors['cl_embedded'] = embedded
_, next_state, logits, loss = self.cl_loss_from_embedding(
embedded, return_intermediates=True)
tf.summary.scalar('classification_loss', loss)
self.tensors['cl_logits'] = logits
self.tensors['cl_loss'] = loss
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
else:
labels = inputs.labels
weights = inputs.weights
acc = layers_lib.accuracy(logits, labels, weights)
tf.summary.scalar('accuracy', acc)
adv_loss = (self.adversarial_loss() * tf.constant(
FLAGS.adv_reg_coeff, name='adv_reg_coeff'))
tf.summary.scalar('adversarial_loss', adv_loss)
total_loss = loss + adv_loss
with tf.control_dependencies([inputs.save_state(next_state)]):
total_loss = tf.identity(total_loss)
tf.summary.scalar('total_classification_loss', total_loss)
return total_loss
def language_model_graph(self, compute_loss=True):
"""Constructs LM graph from inputs to LM loss.
* Caches the VatxtInput object in `self.lm_inputs`
* Caches tensors: `lm_embedded`
Args:
compute_loss: bool, whether to compute and return the loss or stop after
the LSTM computation.
Returns:
loss: scalar float.
"""
inputs = _inputs('train', pretrain=True)
self.lm_inputs = inputs
return self._lm_loss(inputs, compute_loss=compute_loss)
def _lm_loss(self,
inputs,
emb_key='lm_embedded',
lstm_layer='lstm',
lm_loss_layer='lm_loss',
loss_name='lm_loss',
compute_loss=True):
embedded = self.layers['embedding'](inputs.tokens)
self.tensors[emb_key] = embedded
lstm_out, next_state = self.layers[lstm_layer](embedded, inputs.state,
inputs.length)
if compute_loss:
loss = self.layers[lm_loss_layer](
[lstm_out, inputs.labels, inputs.weights])
with tf.control_dependencies([inputs.save_state(next_state)]):
loss = tf.identity(loss)
tf.summary.scalar(loss_name, loss)
return loss
def eval_graph(self, dataset='test'):
"""Constructs classifier evaluation graph.
Args:
dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.
Returns:
eval_ops: dict<metric name, tuple(value, update_op)>
var_restore_dict: dict mapping variable restoration names to variables.
Trainable variables will be mapped to their moving average names.
"""
inputs = _inputs(dataset, pretrain=False)
embedded = self.layers['embedding'](inputs.tokens)
_, next_state, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=inputs, return_intermediates=True)
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
else:
labels = inputs.labels
weights = inputs.weights
eval_ops = {
'accuracy':
tf.contrib.metrics.streaming_accuracy(
layers_lib.predictions(logits), labels, weights)
}
with tf.control_dependencies([inputs.save_state(next_state)]):
acc, acc_update = eval_ops['accuracy']
acc_update = tf.identity(acc_update)
eval_ops['accuracy'] = (acc, acc_update)
var_restore_dict = make_restore_average_vars_dict()
return eval_ops, var_restore_dict
def cl_loss_from_embedding(self,
embedded,
inputs=None,
return_intermediates=False):
"""Compute classification loss from embedding.
Args:
embedded: 3-D float Tensor [batch_size, num_timesteps, embedding_dim]
inputs: VatxtInput, defaults to self.cl_inputs.
return_intermediates: bool, whether to return intermediate tensors or only
the final loss.
Returns:
If return_intermediates is True:
lstm_out, next_state, logits, loss
Else:
loss
"""
if inputs is None:
inputs = self.cl_inputs
lstm_out, next_state = self.layers['lstm'](embedded, inputs.state,
inputs.length)
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
lstm_out = tf.expand_dims(tf.gather_nd(lstm_out, indices), 1)
labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
else:
labels = inputs.labels
weights = inputs.weights
logits = self.layers['cl_logits'](lstm_out)
loss = layers_lib.classification_loss(logits, labels, weights)
if return_intermediates:
return lstm_out, next_state, logits, loss
else:
return loss
def adversarial_loss(self):
"""Compute adversarial loss based on FLAGS.adv_training_method."""
def random_perturbation_loss():
return adv_lib.random_perturbation_loss(self.tensors['cl_embedded'],
self.cl_inputs.length,
self.cl_loss_from_embedding)
def adversarial_loss():
return adv_lib.adversarial_loss(self.tensors['cl_embedded'],
self.tensors['cl_loss'],
self.cl_loss_from_embedding)
def virtual_adversarial_loss():
"""Computes virtual adversarial loss.
Uses lm_inputs and constructs the language model graph if it hasn't yet
been constructed.
Also ensures that the LM input states are saved for LSTM state-saving
BPTT.
Returns:
loss: float scalar.
"""
if self.lm_inputs is None:
self.language_model_graph(compute_loss=False)
def logits_from_embedding(embedded, return_next_state=False):
_, next_state, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=self.lm_inputs, return_intermediates=True)
if return_next_state:
return next_state, logits
else:
return logits
next_state, lm_cl_logits = logits_from_embedding(
self.tensors['lm_embedded'], return_next_state=True)
va_loss = adv_lib.virtual_adversarial_loss(
lm_cl_logits, self.tensors['lm_embedded'], self.lm_inputs,
logits_from_embedding)
with tf.control_dependencies([self.lm_inputs.save_state(next_state)]):
va_loss = tf.identity(va_loss)
return va_loss
def combo_loss():
return adversarial_loss() + virtual_adversarial_loss()
adv_training_methods = {
# Random perturbation
'rp': random_perturbation_loss,
# Adversarial training
'at': adversarial_loss,
# Virtual adversarial training
'vat': virtual_adversarial_loss,
# Both at and vat
'atvat': combo_loss,
'': lambda: tf.constant(0.),
None: lambda: tf.constant(0.),
}
with tf.name_scope('adversarial_loss'):
return adv_training_methods[FLAGS.adv_training_method]()
class VatxtBidirModel(VatxtModel):
"""Extension of VatxtModel that supports bidirectional input."""
def __init__(self):
super(VatxtBidirModel,
self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2)
# Reverse LSTM and LM loss for bidirectional models
self.layers['lstm_reverse'] = layers_lib.LSTM(
FLAGS.rnn_cell_size,
FLAGS.rnn_num_layers,
FLAGS.keep_prob_lstm_out,
name='LSTM_Reverse')
self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss(
FLAGS.vocab_size,
FLAGS.num_candidate_samples,
self.vocab_freqs,
name='LM_loss_reverse')
@property
def pretrained_variables(self):
variables = super(VatxtBidirModel, self).pretrained_variables
variables.extend(self.layers['lstm_reverse'].trainable_weights)
return variables
def classifier_graph(self):
"""Constructs classifier graph from inputs to classifier loss.
* Caches the VatxtInput objects in `self.cl_inputs`
* Caches tensors: `cl_embedded` (tuple of forward and reverse), `cl_logits`,
`cl_loss`
Returns:
loss: scalar float.
"""
inputs = _inputs('train', pretrain=False, bidir=True)
self.cl_inputs = inputs
f_inputs, _ = inputs
# Embed both forward and reverse with a shared embedding
embedded = [self.layers['embedding'](inp.tokens) for inp in inputs]
self.tensors['cl_embedded'] = embedded
_, next_states, logits, loss = self.cl_loss_from_embedding(
embedded, return_intermediates=True)
tf.summary.scalar('classification_loss', loss)
self.tensors['cl_logits'] = logits
self.tensors['cl_loss'] = loss
acc = layers_lib.accuracy(logits, f_inputs.labels, f_inputs.weights)
tf.summary.scalar('accuracy', acc)
adv_loss = (self.adversarial_loss() * tf.constant(
FLAGS.adv_reg_coeff, name='adv_reg_coeff'))
tf.summary.scalar('adversarial_loss', adv_loss)
total_loss = loss + adv_loss
saves = [inp.save_state(state) for (inp, state) in zip(inputs, next_states)]
with tf.control_dependencies(saves):
total_loss = tf.identity(total_loss)
tf.summary.scalar('total_classification_loss', total_loss)
return total_loss
def language_model_graph(self, compute_loss=True):
"""Constructs forward and reverse LM graphs from inputs to LM losses.
* Caches the VatxtInput objects in `self.lm_inputs`
* Caches tensors: `lm_embedded`, `lm_embedded_reverse`
Args:
compute_loss: bool, whether to compute and return the loss or stop after
the LSTM computation.
Returns:
loss: scalar float, sum of forward and reverse losses.
"""
inputs = _inputs('train', pretrain=True, bidir=True)
self.lm_inputs = inputs
f_inputs, r_inputs = inputs
f_loss = self._lm_loss(f_inputs, compute_loss=compute_loss)
r_loss = self._lm_loss(
r_inputs,
emb_key='lm_embedded_reverse',
lstm_layer='lstm_reverse',
lm_loss_layer='lm_loss_reverse',
loss_name='lm_loss_reverse',
compute_loss=compute_loss)
if compute_loss:
return f_loss + r_loss
def eval_graph(self, dataset='test'):
"""Constructs classifier evaluation graph.
Args:
dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.
Returns:
eval_ops: dict<metric name, tuple(value, update_op)>
var_restore_dict: dict mapping variable restoration names to variables.
Trainable variables will be mapped to their moving average names.
"""
inputs = _inputs(dataset, pretrain=False, bidir=True)
embedded = [self.layers['embedding'](inp.tokens) for inp in inputs]
_, next_states, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=inputs, return_intermediates=True)
f_inputs, _ = inputs
eval_ops = {
'accuracy':
tf.contrib.metrics.streaming_accuracy(
layers_lib.predictions(logits), f_inputs.labels,
f_inputs.weights)
}
# Save states on accuracy update
saves = [inp.save_state(state) for (inp, state) in zip(inputs, next_states)]
with tf.control_dependencies(saves):
acc, acc_update = eval_ops['accuracy']
acc_update = tf.identity(acc_update)
eval_ops['accuracy'] = (acc, acc_update)
var_restore_dict = make_restore_average_vars_dict()
return eval_ops, var_restore_dict
def cl_loss_from_embedding(self,
embedded,
inputs=None,
return_intermediates=False):
"""Compute classification loss from embedding.
Args:
embedded: Length 2 tuple of 3-D float Tensor
[batch_size, num_timesteps, embedding_dim].
inputs: Length 2 tuple of VatxtInput, defaults to self.cl_inputs.
return_intermediates: bool, whether to return intermediate tensors or only
the final loss.
Returns:
If return_intermediates is True:
lstm_out, next_states, logits, loss
Else:
loss
"""
if inputs is None:
inputs = self.cl_inputs
out = []
for (layer_name, emb, inp) in zip(['lstm', 'lstm_reverse'], embedded,
inputs):
out.append(self.layers[layer_name](emb, inp.state, inp.length))
lstm_outs, next_states = zip(*out)
# Concatenate output of forward and reverse LSTMs
lstm_out = tf.concat(lstm_outs, 1)
logits = self.layers['cl_logits'](lstm_out)
f_inputs, _ = inputs # pylint: disable=unpacking-non-sequence
loss = layers_lib.classification_loss(logits, f_inputs.labels,
f_inputs.weights)
if return_intermediates:
return lstm_out, next_states, logits, loss
else:
return loss
def adversarial_loss(self):
"""Compute adversarial loss based on FLAGS.adv_training_method."""
def random_perturbation_loss():
return adv_lib.random_perturbation_loss_bidir(self.tensors['cl_embedded'],
self.cl_inputs[0].length,
self.cl_loss_from_embedding)
def adversarial_loss():
return adv_lib.adversarial_loss_bidir(self.tensors['cl_embedded'],
self.tensors['cl_loss'],
self.cl_loss_from_embedding)
def virtual_adversarial_loss():
"""Computes virtual adversarial loss.
Uses lm_inputs and constructs the language model graph if it hasn't yet
been constructed.
Also ensures that the LM input states are saved for LSTM state-saving
BPTT.
Returns:
loss: float scalar.
"""
if self.lm_inputs is None:
self.language_model_graph(compute_loss=False)
def logits_from_embedding(embedded, return_next_state=False):
_, next_states, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=self.lm_inputs, return_intermediates=True)
if return_next_state:
return next_states, logits
else:
return logits
lm_embedded = (self.tensors['lm_embedded'],
self.tensors['lm_embedded_reverse'])
next_states, lm_cl_logits = logits_from_embedding(
lm_embedded, return_next_state=True)
va_loss = adv_lib.virtual_adversarial_loss_bidir(
lm_cl_logits, lm_embedded, self.lm_inputs, logits_from_embedding)
saves = [
inp.save_state(state)
for (inp, state) in zip(self.lm_inputs, next_states)
]
with tf.control_dependencies(saves):
va_loss = tf.identity(va_loss)
return va_loss
def combo_loss():
return adversarial_loss() + virtual_adversarial_loss()
adv_training_methods = {
# Random perturbation
'rp': random_perturbation_loss,
# Adversarial training
'at': adversarial_loss,
# Virtual adversarial training
'vat': virtual_adversarial_loss,
# Both at and vat
'atvat': combo_loss,
'': lambda: tf.constant(0.),
None: lambda: tf.constant(0.),
}
with tf.name_scope('adversarial_loss'):
return adv_training_methods[FLAGS.adv_training_method]()
def _inputs(dataset='train', pretrain=False, bidir=False):
return inputs_lib.inputs(
data_dir=FLAGS.data_dir,
phase=dataset,
bidir=bidir,
pretrain=pretrain,
use_seq2seq=pretrain and FLAGS.use_seq2seq_autoencoder,
state_size=FLAGS.rnn_cell_size,
num_layers=FLAGS.rnn_num_layers,
batch_size=FLAGS.batch_size,
unroll_steps=FLAGS.num_timesteps,
eos_id=FLAGS.vocab_size - 1)
def _get_vocab_freqs():
"""Returns vocab frequencies.
Returns:
List of integers, length=FLAGS.vocab_size.
Raises:
ValueError: if the length of the frequency file is not equal to the vocab
size, or if the file is not found.
"""
path = FLAGS.vocab_freq_path or os.path.join(FLAGS.data_dir, 'vocab_freq.txt')
if tf.gfile.Exists(path):
with tf.gfile.Open(path) as f:
# Get pre-calculated frequencies of words.
reader = csv.reader(f, quoting=csv.QUOTE_NONE)
freqs = [int(row[-1]) for row in reader]
if len(freqs) != FLAGS.vocab_size:
raise ValueError('Frequency file length %d != vocab size %d' %
(len(freqs), FLAGS.vocab_size))
else:
if FLAGS.vocab_freq_path:
raise ValueError('vocab_freq_path not found')
freqs = [1] * FLAGS.vocab_size
return freqs
def make_restore_average_vars_dict():
"""Returns dict mapping moving average names to variables."""
var_restore_dict = {}
variable_averages = tf.train.ExponentialMovingAverage(0.999)
for v in tf.global_variables():
if v in tf.trainable_variables():
name = variable_averages.average_name(v)
else:
name = v.op.name
var_restore_dict[name] = v
return var_restore_dict
def optimize(loss, global_step):
return layers_lib.optimize(
loss, global_step, FLAGS.max_grad_norm, FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor, FLAGS.sync_replicas,
FLAGS.replicas_to_aggregate, FLAGS.task)
| jiaphuan/models | research/adversarial_text/graphs.py | Python | apache-2.0 | 24,710 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018 the Pockets team, see AUTHORS.
# Licensed under the BSD License, see LICENSE for details.
"""A pocket full of useful collection tools!"""
from __future__ import absolute_import, print_function
from collections import defaultdict
from inspect import isclass
try:
from collections.abc import Iterable, Mapping, Sized
except ImportError:
from collections import Iterable, Mapping, Sized
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
import six
__all__ = [
"groupify",
"keydefaultdict",
"is_listy",
"listify",
"is_mappy",
"mappify",
"nesteddefaultdict",
"readable_join",
"uniquify",
]
def groupify(items, keys, val_key=None):
"""
Groups a list of items into nested OrderedDicts based on the given keys.
Note:
On Python 2.6 the return value will use regular dicts instead of
OrderedDicts.
>>> from __future__ import print_function
>>> from json import dumps
>>>
>>> ex = lambda x: print(dumps(x, indent=2, sort_keys=True, default=repr))
>>>
>>> class Reminder:
... def __init__(self, when, where, what):
... self.when = when
... self.where = where
... self.what = what
... def __repr__(self):
... return 'Reminder({0.when}, {0.where}, {0.what})'.format(self)
...
>>> reminders = [
... Reminder('Fri', 'Home', 'Eat cereal'),
... Reminder('Fri', 'Work', 'Feed Ivan'),
... Reminder('Sat', 'Home', 'Sleep in'),
... Reminder('Sat', 'Home', 'Play Zelda'),
... Reminder('Sun', 'Home', 'Sleep in'),
... Reminder('Sun', 'Work', 'Reset database')]
>>>
>>> ex(groupify(reminders, 'when'))
{
"Fri": [
"Reminder(Fri, Home, Eat cereal)",
"Reminder(Fri, Work, Feed Ivan)"
],
"Sat": [
"Reminder(Sat, Home, Sleep in)",
"Reminder(Sat, Home, Play Zelda)"
],
"Sun": [
"Reminder(Sun, Home, Sleep in)",
"Reminder(Sun, Work, Reset database)"
]
}
>>>
>>> ex(groupify(reminders, ['when', 'where']))
{
"Fri": {
"Home": [
"Reminder(Fri, Home, Eat cereal)"
],
"Work": [
"Reminder(Fri, Work, Feed Ivan)"
]
},
"Sat": {
"Home": [
"Reminder(Sat, Home, Sleep in)",
"Reminder(Sat, Home, Play Zelda)"
]
},
"Sun": {
"Home": [
"Reminder(Sun, Home, Sleep in)"
],
"Work": [
"Reminder(Sun, Work, Reset database)"
]
}
}
>>>
>>> ex(groupify(reminders, ['when', 'where'], 'what'))
{
"Fri": {
"Home": [
"Eat cereal"
],
"Work": [
"Feed Ivan"
]
},
"Sat": {
"Home": [
"Sleep in",
"Play Zelda"
]
},
"Sun": {
"Home": [
"Sleep in"
],
"Work": [
"Reset database"
]
}
}
>>>
>>> ex(groupify(reminders, lambda r: '{0.when} - {0.where}'.format(r), 'what'))
{
"Fri - Home": [
"Eat cereal"
],
"Fri - Work": [
"Feed Ivan"
],
"Sat - Home": [
"Sleep in",
"Play Zelda"
],
"Sun - Home": [
"Sleep in"
],
"Sun - Work": [
"Reset database"
]
}
Args:
items (list): The list of items to arrange in groups.
keys (str|callable|list): The key or keys that should be used to group
`items`. If multiple keys are given, then each will correspond to
an additional level of nesting in the order they are given.
val_key (str|callable): A key or callable used to generate the leaf
values in the nested OrderedDicts. If `val_key` is `None`, then
the item itself is used. Defaults to `None`.
Returns:
OrderedDict: Nested OrderedDicts with `items` grouped by `keys`.
""" # noqa: E501
if not keys:
return items
keys = listify(keys)
last_key = keys[-1]
is_callable = callable(val_key)
groupified = OrderedDict()
for item in items:
current = groupified
for key in keys:
attr = key(item) if callable(key) else getattr(item, key)
if attr not in current:
current[attr] = [] if key is last_key else OrderedDict()
current = current[attr]
if val_key:
value = val_key(item) if is_callable else getattr(item, val_key)
else:
value = item
current.append(value)
return groupified
class keydefaultdict(defaultdict):
"""
A defaultdict that passes the missed key to the factory function.
>>> def echo_factory(missing_key):
... return missing_key
...
>>> d = keydefaultdict(echo_factory)
>>> d['Hello World']
'Hello World'
>>> d['Hello World'] = 'Goodbye'
>>> d['Hello World']
'Goodbye'
"""
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
ret = self[key] = self.default_factory(key)
return ret
def is_listy(x):
"""
Return True if `x` is "listy", i.e. a list-like object.
"Listy" is defined as a sized iterable which is neither a map nor a string:
>>> is_listy(['a', 'b'])
True
>>> is_listy(set())
True
>>> is_listy(iter(['a', 'b']))
False
>>> is_listy({'a': 'b'})
False
>>> is_listy('a regular string')
False
Note:
Iterables and generators fail the "listy" test because they
are not sized.
Args:
x (any value): The object to test.
Returns:
bool: True if `x` is "listy", False otherwise.
"""
return (
isinstance(x, Sized)
and isinstance(x, Iterable)
and not isinstance(x, (Mapping, type(b"")))
and not isinstance(x, six.string_types)
)
def listify(x, minlen=0, default=None, cls=None):
"""
Return a listified version of `x`.
If `x` is a non-string iterable, it is wrapped in a list; otherwise
a list is returned with `x` as its only element. If `x` is `None`, an
empty list is returned.
>>> listify('a regular string')
['a regular string']
>>> listify(tuple(['a', 'b', 'c']))
['a', 'b', 'c']
>>> listify({'a': 'A'})
[{'a': 'A'}]
>>> listify(None)
[]
Note:
Not guaranteed to return a copy of `x`. If `x` is already a list and
`cls` is not specified, then `x` itself is returned.
Args:
x (any value): Value to listify.
minlen (int): Minimum length of the returned list. If the returned
list would be shorter than `minlen` it is padded with values from
`default`. Defaults to 0.
>>> listify([], minlen=0)
[]
>>> listify([], minlen=1)
[None]
>>> listify('item', minlen=3)
['item', None, None]
default (any value): Value that should be used to pad the list if it
would be shorter than `minlen`:
>>> listify([], minlen=1, default='PADDING')
['PADDING']
>>> listify('item', minlen=3, default='PADDING')
['item', 'PADDING', 'PADDING']
cls (class or callable): Instead of wrapping `x` in a list, wrap it
in an instance of `cls`. `cls` should accept an iterable object
as its single parameter when called:
>>> from collections import deque
>>> listify(['a', 'b', 'c'], cls=deque)
deque(['a', 'b', 'c'])
Returns:
list or `cls`: A listified version of `x`.
"""
if x is None:
x = []
elif not isinstance(x, list):
x = list(x) if is_listy(x) else [x]
if minlen and len(x) < minlen:
x.extend([default for i in range(minlen - len(x))])
if cls and not (isclass(cls) and issubclass(type(x), cls)):
x = cls(x)
return x
def is_mappy(x):
"""
Return True if `x` is "mappy", i.e. a map-like object.
"Mappy" is defined as any instance of `collections.Mapping`:
>>> is_mappy({'a': 'b'})
True
>>> from collections import defaultdict
>>> is_mappy(defaultdict(list))
True
>>> is_mappy('a regular string')
False
>>> is_mappy(['a', 'b'])
False
>>> is_listy(iter({'a': 'b'}))
False
Note:
Iterables and generators fail the "mappy" test.
Args:
x (any value): The object to test.
Returns:
bool: True if `x` is "mappy", False otherwise.
"""
return isinstance(x, Mapping)
def mappify(x, default=True, cls=None):
"""
Return a mappified version of `x`.
If `x` is a string, it becomes the only key of the returned dict. If `x`
is a non-string iterable, the elements of `x` become keys in the returned
dict. The values of the returned dict are set to `default`. If `x` is
`None`, an empty dict is returned.
If `x` is a map, it is returned directly.
>>> mappify('a regular string')
{'a regular string': True}
>>> mappify(['a'])
{'a': True}
>>> mappify({'a': 'A'})
{'a': 'A'}
>>> mappify(None)
{}
Note:
Not guaranteed to return a copy of `x`. If `x` is already a map and
`cls` is not specified, then `x` itself is returned.
Args:
x (str, map, or iterable): Value to mappify.
default (any value): Value used to fill out missing values of the
returned dict.
cls (class or callable): Instead of wrapping `x` in a dict, wrap it
in an instance of `cls`. `cls` should accept a map object as
its single parameter when called:
>>> from collections import defaultdict
>>> mappify('a', cls=lambda x: defaultdict(None, x))
defaultdict(None, {'a': True})
Returns:
dict or `cls`: A mappified version of `x`.
Raises:
TypeError: If `x` is not a map, iterable, or string.
"""
if x is None:
x = {}
elif not isinstance(x, Mapping):
if isinstance(x, six.string_types):
x = {x: default}
elif isinstance(x, Iterable):
# If cls is specified, attempt to preserve the order of x, in
# case cls is also a class that preserves order.
arg = [(v, default) for v in x]
x = OrderedDict(arg) if cls else dict(arg)
else:
raise TypeError(
"Unable to mappify non-mappy {0}".format(type(x)), x
)
if cls and not (isclass(cls) and issubclass(type(x), cls)):
x = cls(x)
return x
def nesteddefaultdict():
"""
A defaultdict that returns nested defaultdicts as the default value.
Each defaultdict returned as the default value will also return nested
defaultdicts, and so on.
>>> nested = nesteddefaultdict()
>>> nested_child = nested['New Key 1']
>>> nested_child
defaultdict(...)
>>> nested_grandchild = nested_child['New Key 2']
>>> nested_grandchild
defaultdict(...)
"""
return defaultdict(nesteddefaultdict)
def readable_join(xs, conjunction="and", sep=","):
"""
Accepts a list of strings and separates them with commas as grammatically
appropriate with a conjunction before the final entry. Any input strings
containing only whitespace will not be included in the result.
>>> readable_join(['foo'])
'foo'
>>> readable_join(['foo', 'bar'])
'foo and bar'
>>> readable_join(['foo', 'bar', 'baz'])
'foo, bar, and baz'
>>> readable_join(['foo', ' ', '', 'bar', '', ' ', 'baz'])
'foo, bar, and baz'
>>> readable_join(['foo', 'bar', 'baz'], 'or')
'foo, bar, or baz'
>>> readable_join(['foo', 'bar', 'baz'], 'but never')
'foo, bar, but never baz'
"""
xs = [s for s in map(lambda s: str(s).strip(), listify(xs)) if s]
if len(xs) > 1:
xs = list(xs)
xs[-1] = conjunction + " " + xs[-1]
return (sep + " " if len(xs) > 2 else " ").join(xs)
def uniquify(x, key=lambda o: o, cls=None):
"""
Returns an order-preserved copy of `x` with duplicate items removed.
>>> uniquify(['a', 'z', 'a', 'b', 'a', 'y', 'a', 'c', 'a', 'x'])
['a', 'z', 'b', 'y', 'c', 'x']
Args:
x (Sequence): Sequence to uniquify.
key (str or callable): Similar to `sorted`, specifies an attribute or
function of one argument that is used to extract a comparison key
from each list element: key=str.lower. By default, compares the
elements directly.
>>> strings = ['ASDF', 'asdf', 'ZXCV', 'zxcv']
>>> uniquify(strings, key=str.lower)
['ASDF', 'ZXCV']
cls (class or callable): Instead of wrapping `x` in a list, wrap it
in an instance of `cls`. `cls` should accept an iterable object
as its single parameter when called:
>>> from collections import deque
>>> listify(['a', 'b', 'c'], cls=deque)
deque(['a', 'b', 'c'])
Returns:
list: An order-preserved copy of `x` with duplicate items removed.
Raises:
TypeError: If `x` is not "listy".
"""
if not is_listy(x):
raise TypeError("Unable to uniquify non-listy {0}".format(type(x)), x)
seen = set()
keys = [(key(o) if callable(key) else getattr(o, key), o) for o in x]
x = [o for k, o in keys if k not in seen and not seen.add(k)]
if cls and not (isclass(cls) and issubclass(type(x), cls)):
x = cls(x)
return x
| smmribeiro/intellij-community | python/helpers/pockets/collections.py | Python | apache-2.0 | 13,770 |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import netaddr
from tempest_lib.common.utils import data_utils
from neutron.tests.api import base
from neutron.tests.api import base_security_groups as sec_base
from neutron.tests.tempest.common import custom_matchers
from neutron.tests.tempest import config
from neutron.tests.tempest import test
CONF = config.CONF
class PortsTestJSON(sec_base.BaseSecGroupTest):
"""
Test the following operations for ports:
port create
port delete
port list
port show
port update
"""
@classmethod
def resource_setup(cls):
super(PortsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.port = cls.create_port(cls.network)
def _delete_port(self, port_id):
self.client.delete_port(port_id)
body = self.client.list_ports()
ports_list = body['ports']
self.assertNotIn(port_id, [n['id'] for n in ports_list])
@test.attr(type='smoke')
@test.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
def test_create_update_delete_port(self):
# Verify port creation
body = self.client.create_port(network_id=self.network['id'])
port = body['port']
# Schedule port deletion with verification upon test completion
self.addCleanup(self._delete_port, port['id'])
self.assertTrue(port['admin_state_up'])
# Verify port update
new_name = "New_Port"
body = self.client.update_port(port['id'],
name=new_name,
admin_state_up=False)
updated_port = body['port']
self.assertEqual(updated_port['name'], new_name)
self.assertFalse(updated_port['admin_state_up'])
@test.idempotent_id('67f1b811-f8db-43e2-86bd-72c074d4a42c')
def test_create_bulk_port(self):
network1 = self.network
name = data_utils.rand_name('network-')
network2 = self.create_network(network_name=name)
network_list = [network1['id'], network2['id']]
port_list = [{'network_id': net_id} for net_id in network_list]
body = self.client.create_bulk_port(port_list)
created_ports = body['ports']
port1 = created_ports[0]
port2 = created_ports[1]
self.addCleanup(self._delete_port, port1['id'])
self.addCleanup(self._delete_port, port2['id'])
self.assertEqual(port1['network_id'], network1['id'])
self.assertEqual(port2['network_id'], network2['id'])
self.assertTrue(port1['admin_state_up'])
self.assertTrue(port2['admin_state_up'])
@classmethod
def _get_ipaddress_from_tempest_conf(cls):
"""Return first subnet gateway for configured CIDR """
if cls._ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
elif cls._ip_version == 6:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
return netaddr.IPAddress(cidr)
@test.attr(type='smoke')
@test.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
def test_create_port_in_allowed_allocation_pools(self):
network = self.create_network()
net_id = network['id']
address = self._get_ipaddress_from_tempest_conf()
allocation_pools = {'allocation_pools': [{'start': str(address + 4),
'end': str(address + 6)}]}
subnet = self.create_subnet(network, **allocation_pools)
self.addCleanup(self.client.delete_subnet, subnet['id'])
body = self.client.create_port(network_id=net_id)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
ip_address = port['fixed_ips'][0]['ip_address']
start_ip_address = allocation_pools['allocation_pools'][0]['start']
end_ip_address = allocation_pools['allocation_pools'][0]['end']
ip_range = netaddr.IPRange(start_ip_address, end_ip_address)
self.assertIn(ip_address, ip_range)
@test.attr(type='smoke')
@test.idempotent_id('c9a685bd-e83f-499c-939f-9f7863ca259f')
def test_show_port(self):
# Verify the details of port
body = self.client.show_port(self.port['id'])
port = body['port']
self.assertIn('id', port)
# TODO(Santosh)- This is a temporary workaround to compare create_port
# and show_port dict elements.Remove this once extra_dhcp_opts issue
# gets fixed in neutron.( bug - 1365341.)
self.assertThat(self.port,
custom_matchers.MatchesDictExceptForKeys
(port, excluded_keys=['extra_dhcp_opts']))
@test.attr(type='smoke')
@test.idempotent_id('45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd')
def test_show_port_fields(self):
# Verify specific fields of a port
fields = ['id', 'mac_address']
body = self.client.show_port(self.port['id'],
fields=fields)
port = body['port']
self.assertEqual(sorted(port.keys()), sorted(fields))
for field_name in fields:
self.assertEqual(port[field_name], self.port[field_name])
@test.attr(type='smoke')
@test.idempotent_id('cf95b358-3e92-4a29-a148-52445e1ac50e')
def test_list_ports(self):
# Verify the port exists in the list of all ports
body = self.client.list_ports()
ports = [port['id'] for port in body['ports']
if port['id'] == self.port['id']]
self.assertNotEmpty(ports, "Created port not found in the list")
@test.attr(type='smoke')
@test.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
def test_port_list_filter_by_router_id(self):
# Create a router
network = self.create_network()
self.addCleanup(self.client.delete_network, network['id'])
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
router = self.create_router(data_utils.rand_name('router-'))
self.addCleanup(self.client.delete_router, router['id'])
port = self.client.create_port(network_id=network['id'])
# Add router interface to port created above
self.client.add_router_interface_with_port_id(
router['id'], port['port']['id'])
self.addCleanup(self.client.remove_router_interface_with_port_id,
router['id'], port['port']['id'])
# List ports filtered by router_id
port_list = self.client.list_ports(device_id=router['id'])
ports = port_list['ports']
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['id'], port['port']['id'])
self.assertEqual(ports[0]['device_id'], router['id'])
@test.attr(type='smoke')
@test.idempotent_id('ff7f117f-f034-4e0e-abff-ccef05c454b4')
def test_list_ports_fields(self):
# Verify specific fields of ports
fields = ['id', 'mac_address']
body = self.client.list_ports(fields=fields)
ports = body['ports']
self.assertNotEmpty(ports, "Port list returned is empty")
# Asserting the fields returned are correct
for port in ports:
self.assertEqual(sorted(fields), sorted(port.keys()))
@test.attr(type='smoke')
@test.idempotent_id('63aeadd4-3b49-427f-a3b1-19ca81f06270')
def test_create_update_port_with_second_ip(self):
# Create a network with two subnets
network = self.create_network()
self.addCleanup(self.client.delete_network, network['id'])
subnet_1 = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet_1['id'])
subnet_2 = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet_2['id'])
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
fixed_ips = fixed_ip_1 + fixed_ip_2
# Create a port with multiple IP addresses
port = self.create_port(network,
fixed_ips=fixed_ips)
self.addCleanup(self.client.delete_port, port['id'])
self.assertEqual(2, len(port['fixed_ips']))
check_fixed_ips = [subnet_1['id'], subnet_2['id']]
for item in port['fixed_ips']:
self.assertIn(item['subnet_id'], check_fixed_ips)
# Update the port to return to a single IP address
port = self.update_port(port, fixed_ips=fixed_ip_1)
self.assertEqual(1, len(port['fixed_ips']))
# Update the port with a second IP address from second subnet
port = self.update_port(port, fixed_ips=fixed_ips)
self.assertEqual(2, len(port['fixed_ips']))
def _update_port_with_security_groups(self, security_groups_names):
subnet_1 = self.create_subnet(self.network)
self.addCleanup(self.client.delete_subnet, subnet_1['id'])
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
security_groups_list = list()
for name in security_groups_names:
group_create_body = self.client.create_security_group(
name=name)
self.addCleanup(self.client.delete_security_group,
group_create_body['security_group']['id'])
security_groups_list.append(group_create_body['security_group']
['id'])
# Create a port
sec_grp_name = data_utils.rand_name('secgroup')
security_group = self.client.create_security_group(name=sec_grp_name)
self.addCleanup(self.client.delete_security_group,
security_group['security_group']['id'])
post_body = {
"name": data_utils.rand_name('port-'),
"security_groups": [security_group['security_group']['id']],
"network_id": self.network['id'],
"admin_state_up": True,
"fixed_ips": fixed_ip_1}
body = self.client.create_port(**post_body)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
# Update the port with security groups
subnet_2 = self.create_subnet(self.network)
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
update_body = {"name": data_utils.rand_name('port-'),
"admin_state_up": False,
"fixed_ips": fixed_ip_2,
"security_groups": security_groups_list}
body = self.client.update_port(port['id'], **update_body)
port_show = body['port']
# Verify the security groups and other attributes updated to port
exclude_keys = set(port_show).symmetric_difference(update_body)
exclude_keys.add('fixed_ips')
exclude_keys.add('security_groups')
self.assertThat(port_show, custom_matchers.MatchesDictExceptForKeys(
update_body, exclude_keys))
self.assertEqual(fixed_ip_2[0]['subnet_id'],
port_show['fixed_ips'][0]['subnet_id'])
for security_group in security_groups_list:
self.assertIn(security_group, port_show['security_groups'])
@test.attr(type='smoke')
@test.idempotent_id('58091b66-4ff4-4cc1-a549-05d60c7acd1a')
def test_update_port_with_security_group_and_extra_attributes(self):
self._update_port_with_security_groups(
[data_utils.rand_name('secgroup')])
@test.attr(type='smoke')
@test.idempotent_id('edf6766d-3d40-4621-bc6e-2521a44c257d')
def test_update_port_with_two_security_groups_and_extra_attributes(self):
self._update_port_with_security_groups(
[data_utils.rand_name('secgroup'),
data_utils.rand_name('secgroup')])
@test.attr(type='smoke')
@test.idempotent_id('13e95171-6cbd-489c-9d7c-3f9c58215c18')
def test_create_show_delete_port_user_defined_mac(self):
# Create a port for a legal mac
body = self.client.create_port(network_id=self.network['id'])
old_port = body['port']
free_mac_address = old_port['mac_address']
self.client.delete_port(old_port['id'])
# Create a new port with user defined mac
body = self.client.create_port(network_id=self.network['id'],
mac_address=free_mac_address)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
body = self.client.show_port(port['id'])
show_port = body['port']
self.assertEqual(free_mac_address,
show_port['mac_address'])
@test.attr(type='smoke')
@test.idempotent_id('4179dcb9-1382-4ced-84fe-1b91c54f5735')
def test_create_port_with_no_securitygroups(self):
network = self.create_network()
self.addCleanup(self.client.delete_network, network['id'])
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
port = self.create_port(network, security_groups=[])
self.addCleanup(self.client.delete_port, port['id'])
self.assertIsNotNone(port['security_groups'])
self.assertEmpty(port['security_groups'])
class PortsAdminExtendedAttrsTestJSON(base.BaseAdminNetworkTest):
@classmethod
def resource_setup(cls):
super(PortsAdminExtendedAttrsTestJSON, cls).resource_setup()
cls.identity_client = cls._get_identity_admin_client()
cls.tenant = cls.identity_client.get_tenant_by_name(
CONF.identity.tenant_name)
cls.network = cls.create_network()
cls.host_id = socket.gethostname()
@test.attr(type='smoke')
@test.idempotent_id('8e8569c1-9ac7-44db-8bc1-f5fb2814f29b')
def test_create_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id'],
"binding:host_id": self.host_id}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
host_id = port['binding:host_id']
self.assertIsNotNone(host_id)
self.assertEqual(self.host_id, host_id)
@test.attr(type='smoke')
@test.idempotent_id('6f6c412c-711f-444d-8502-0ac30fbf5dd5')
def test_update_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id']}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
update_body = {"binding:host_id": self.host_id}
body = self.admin_client.update_port(port['id'], **update_body)
updated_port = body['port']
host_id = updated_port['binding:host_id']
self.assertIsNotNone(host_id)
self.assertEqual(self.host_id, host_id)
@test.attr(type='smoke')
@test.idempotent_id('1c82a44a-6c6e-48ff-89e1-abe7eaf8f9f8')
def test_list_ports_binding_ext_attr(self):
# Create a new port
post_body = {"network_id": self.network['id']}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
# Update the port's binding attributes so that is now 'bound'
# to a host
update_body = {"binding:host_id": self.host_id}
self.admin_client.update_port(port['id'], **update_body)
# List all ports, ensure new port is part of list and its binding
# attributes are set and accurate
body = self.admin_client.list_ports()
ports_list = body['ports']
pids_list = [p['id'] for p in ports_list]
self.assertIn(port['id'], pids_list)
listed_port = [p for p in ports_list if p['id'] == port['id']]
self.assertEqual(1, len(listed_port),
'Multiple ports listed with id %s in ports listing: '
'%s' % (port['id'], ports_list))
self.assertEqual(self.host_id, listed_port[0]['binding:host_id'])
@test.attr(type='smoke')
@test.idempotent_id('b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13')
def test_show_port_binding_ext_attr(self):
body = self.admin_client.create_port(network_id=self.network['id'])
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
body = self.admin_client.show_port(port['id'])
show_port = body['port']
self.assertEqual(port['binding:host_id'],
show_port['binding:host_id'])
self.assertEqual(port['binding:vif_type'],
show_port['binding:vif_type'])
self.assertEqual(port['binding:vif_details'],
show_port['binding:vif_details'])
class PortsIpV6TestJSON(PortsTestJSON):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
class PortsAdminExtendedAttrsIpV6TestJSON(PortsAdminExtendedAttrsTestJSON):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
| yanheven/neutron | neutron/tests/api/test_ports.py | Python | apache-2.0 | 17,851 |
# -*- coding: utf-8 -*-
"""Deletes all feedback that have a description that's an empty
string.
"""
from __future__ import unicode_literals
import sys
from django.db import models, migrations
def delete_feedback(apps, schema_editor):
"""Delete all feedback with an empty description"""
Response = apps.get_model('feedback', 'Response')
qs = Response.objects.filter(description='')
count = qs.count()
if count > 0:
qs.delete()
if 'test' not in sys.argv:
print 'Deleted {0} responses'.format(count)
def noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('feedback', '0002_make_products'),
]
operations = [
migrations.RunPython(delete_feedback, noop)
]
| staranjeet/fjord | fjord/feedback/migrations/0003_delete_empty_desc_feedback.py | Python | bsd-3-clause | 783 |
"""
This module implements the FormRequest class which is a more convenient class
(than Request) to generate Requests based on form data.
See documentation in docs/topics/request-response.rst
"""
import six
from six.moves.urllib.parse import urljoin, urlencode
import lxml.html
from parsel.selector import create_root_node
from w3lib.html import strip_html5_whitespace
from scrapy.http.request import Request
from scrapy.utils.python import to_bytes, is_listlike
from scrapy.utils.response import get_base_url
class FormRequest(Request):
def __init__(self, *args, **kwargs):
formdata = kwargs.pop('formdata', None)
if formdata and kwargs.get('method') is None:
kwargs['method'] = 'POST'
super(FormRequest, self).__init__(*args, **kwargs)
if formdata:
items = formdata.items() if isinstance(formdata, dict) else formdata
querystr = _urlencode(items, self.encoding)
if self.method == 'POST':
self.headers.setdefault(b'Content-Type', b'application/x-www-form-urlencoded')
self._set_body(querystr)
else:
self._set_url(self.url + ('&' if '?' in self.url else '?') + querystr)
@classmethod
def from_response(cls, response, formname=None, formid=None, formnumber=0, formdata=None,
clickdata=None, dont_click=False, formxpath=None, formcss=None, **kwargs):
kwargs.setdefault('encoding', response.encoding)
if formcss is not None:
from parsel.csstranslator import HTMLTranslator
formxpath = HTMLTranslator().css_to_xpath(formcss)
form = _get_form(response, formname, formid, formnumber, formxpath)
formdata = _get_inputs(form, formdata, dont_click, clickdata, response)
url = _get_form_url(form, kwargs.pop('url', None))
method = kwargs.pop('method', form.method)
return cls(url=url, method=method, formdata=formdata, **kwargs)
def _get_form_url(form, url):
if url is None:
action = form.get('action')
if action is None:
return form.base_url
return urljoin(form.base_url, strip_html5_whitespace(action))
return urljoin(form.base_url, url)
def _urlencode(seq, enc):
values = [(to_bytes(k, enc), to_bytes(v, enc))
for k, vs in seq
for v in (vs if is_listlike(vs) else [vs])]
return urlencode(values, doseq=1)
def _get_form(response, formname, formid, formnumber, formxpath):
"""Find the form element """
root = create_root_node(response.text, lxml.html.HTMLParser,
base_url=get_base_url(response))
forms = root.xpath('//form')
if not forms:
raise ValueError("No <form> element found in %s" % response)
if formname is not None:
f = root.xpath('//form[@name="%s"]' % formname)
if f:
return f[0]
if formid is not None:
f = root.xpath('//form[@id="%s"]' % formid)
if f:
return f[0]
# Get form element from xpath, if not found, go up
if formxpath is not None:
nodes = root.xpath(formxpath)
if nodes:
el = nodes[0]
while True:
if el.tag == 'form':
return el
el = el.getparent()
if el is None:
break
encoded = formxpath if six.PY3 else formxpath.encode('unicode_escape')
raise ValueError('No <form> element found with %s' % encoded)
# If we get here, it means that either formname was None
# or invalid
if formnumber is not None:
try:
form = forms[formnumber]
except IndexError:
raise IndexError("Form number %d not found in %s" %
(formnumber, response))
else:
return form
def _get_inputs(form, formdata, dont_click, clickdata, response):
try:
formdata_keys = dict(formdata or ()).keys()
except (ValueError, TypeError):
raise ValueError('formdata should be a dict or iterable of tuples')
if not formdata:
formdata = ()
inputs = form.xpath('descendant::textarea'
'|descendant::select'
'|descendant::input[not(@type) or @type['
' not(re:test(., "^(?:submit|image|reset)$", "i"))'
' and (../@checked or'
' not(re:test(., "^(?:checkbox|radio)$", "i")))]]',
namespaces={
"re": "http://exslt.org/regular-expressions"})
values = [(k, u'' if v is None else v)
for k, v in (_value(e) for e in inputs)
if k and k not in formdata_keys]
if not dont_click:
clickable = _get_clickable(clickdata, form)
if clickable and clickable[0] not in formdata and not clickable[0] is None:
values.append(clickable)
if isinstance(formdata, dict):
formdata = formdata.items()
values.extend((k, v) for k, v in formdata if v is not None)
return values
def _value(ele):
n = ele.name
v = ele.value
if ele.tag == 'select':
return _select_value(ele, n, v)
return n, v
def _select_value(ele, n, v):
multiple = ele.multiple
if v is None and not multiple:
# Match browser behaviour on simple select tag without options selected
# And for select tags wihout options
o = ele.value_options
return (n, o[0]) if o else (None, None)
elif v is not None and multiple:
# This is a workround to bug in lxml fixed 2.3.1
# fix https://github.com/lxml/lxml/commit/57f49eed82068a20da3db8f1b18ae00c1bab8b12#L1L1139
selected_options = ele.xpath('.//option[@selected]')
v = [(o.get('value') or o.text or u'').strip() for o in selected_options]
return n, v
def _get_clickable(clickdata, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = [
el for el in form.xpath(
'descendant::input[re:test(@type, "^(submit|image)$", "i")]'
'|descendant::button[not(@type) or re:test(@type, "^submit$", "i")]',
namespaces={"re": "http://exslt.org/regular-expressions"})
]
if not clickables:
return
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get('name'), el.get('value') or '')
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.get('name'), el.get('value') or '')
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = u'.//*' + \
u''.join(u'[@%s="%s"]' % c for c in six.iteritems(clickdata))
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get('name'), el[0].get('value') or '')
elif len(el) > 1:
raise ValueError("Multiple elements found (%r) matching the criteria "
"in clickdata: %r" % (el, clickdata))
else:
raise ValueError('No clickable element matching clickdata: %r' % (clickdata,))
| Ryezhang/scrapy | scrapy/http/request/form.py | Python | bsd-3-clause | 7,658 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class BaseError(Exception):
"""Base error for all test runner errors."""
def __init__(self, message, is_infra_error=False):
super(BaseError, self).__init__(message)
self._is_infra_error = is_infra_error
self.message = message
def __eq__(self, other):
return (self.message == other.message
and self.is_infra_error == other.is_infra_error)
def __ne__(self, other):
return not self == other
@property
def is_infra_error(self):
"""Property to indicate if error was caused by an infrastructure issue."""
return self._is_infra_error
| catapult-project/catapult | devil/devil/base_error.py | Python | bsd-3-clause | 747 |
import unittest
import os
import numpy
from rmgpy.tools.canteraModel import findIgnitionDelay, CanteraCondition, Cantera
from rmgpy.quantity import Quantity
import rmgpy
class CanteraTest(unittest.TestCase):
def testIgnitionDelay(self):
"""
Test that findIgnitionDelay() works.
"""
t = numpy.arange(0,5,0.5)
P = numpy.array([0,0.33,0.5,0.9,2,4,15,16,16.1,16.2])
OH = numpy.array([0,0.33,0.5,0.9,2,4,15,16,7,2])
CO = OH*0.9
t_ign = findIgnitionDelay(t,P)
self.assertEqual(t_ign,2.75)
t_ign = findIgnitionDelay(t,OH,'maxHalfConcentration')
self.assertEqual(t_ign,3)
t_ign = findIgnitionDelay(t,[OH,CO], 'maxSpeciesConcentrations')
self.assertEqual(t_ign,3.5)
def testRepr(self):
"""
Test that the repr function for a CanteraCondition object can reconstitute
the same object
"""
reactorType='IdealGasReactor'
molFrac={'CC': 0.05, '[Ar]': 0.95}
P=(3,'atm')
T=(1500,'K')
terminationTime=(5e-5,'s')
condition = CanteraCondition(reactorType,
terminationTime,
molFrac,
T0=T,
P0=P)
reprCondition=eval(condition.__repr__())
self.assertEqual(reprCondition.T0.value_si,Quantity(T).value_si)
self.assertEqual(reprCondition.P0.value_si,Quantity(P).value_si)
self.assertEqual(reprCondition.V0,None)
self.assertEqual(reprCondition.molFrac,molFrac)
class RMGToCanteraTest(unittest.TestCase):
"""
Contains unit tests for the conversion of RMG species and reaction objects to Cantera objects.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
from rmgpy.chemkin import loadChemkinFile
folder = os.path.join(os.path.dirname(rmgpy.__file__),'tools/data/various_kinetics')
chemkinPath = os.path.join(folder, 'chem_annotated.inp')
dictionaryPath = os.path.join(folder, 'species_dictionary.txt')
transportPath = os.path.join(folder, 'tran.dat')
species, reactions = loadChemkinFile(chemkinPath, dictionaryPath,transportPath)
self.rmg_ctSpecies = [spec.toCantera() for spec in species]
self.rmg_ctReactions = []
for rxn in reactions:
convertedReactions = rxn.toCantera(species)
if isinstance(convertedReactions,list):
self.rmg_ctReactions.extend(convertedReactions)
else:
self.rmg_ctReactions.append(convertedReactions)
job = Cantera()
job.loadChemkinModel(chemkinPath, transportFile=transportPath,quiet=True)
self.ctSpecies = job.model.species()
self.ctReactions = job.model.reactions()
def testSpeciesConversion(self):
"""
Test that species objects convert properly
"""
from rmgpy.tools.canteraModel import checkEquivalentCanteraSpecies
for i in range(len(self.ctSpecies)):
self.assertTrue(checkEquivalentCanteraSpecies(self.ctSpecies[i],self.rmg_ctSpecies[i]))
def testReactionConversion(self):
"""
Test that species objects convert properly
"""
from rmgpy.tools.canteraModel import checkEquivalentCanteraReaction
for i in range(len(self.ctReactions)):
self.assertTrue(checkEquivalentCanteraReaction(self.ctReactions[i],self.rmg_ctReactions[i]))
| chatelak/RMG-Py | rmgpy/tools/canteraTest.py | Python | mit | 3,597 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-14 19:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pootle_store', '0045_remove_suggestion_tmp_state'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pootle_score', '0002_set_user_scores'),
]
operations = [
migrations.CreateModel(
name='UserStoreScore',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(db_index=True)),
('score', models.FloatField(db_index=True)),
('reviewed', models.IntegerField(db_index=True, default=0)),
('suggested', models.IntegerField(db_index=True, default=0)),
('translated', models.IntegerField(db_index=True, default=0)),
('store', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_scores', to='pootle_store.Store')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='store_scores', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
'db_table': 'pootle_user_store_score',
},
),
migrations.AlterUniqueTogether(
name='userstorescore',
unique_together=set([('date', 'store', 'user')]),
),
]
| claudep/pootle | pootle/apps/pootle_score/migrations/0003_add_pootle_user_store_score.py | Python | gpl-3.0 | 1,645 |
from __future__ import absolute_import
import json
import logging
from pip._vendor import six
from pip._vendor.six.moves import zip_longest
from pip._internal.cli import cmdoptions
from pip._internal.cli.base_command import Command
from pip._internal.exceptions import CommandError
from pip._internal.index import PackageFinder
from pip._internal.utils.misc import (
dist_is_editable, get_installed_distributions,
)
from pip._internal.utils.packaging import get_installer
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(
'--format',
action='store',
dest='list_format',
default="columns",
choices=('columns', 'freeze', 'json'),
help="Select the output format among: columns (default), freeze, "
"or json",
)
cmd_opts.add_option(
'--not-required',
action='store_true',
dest='not_required',
help="List packages that are not dependencies of "
"installed packages.",
)
cmd_opts.add_option(
'--exclude-editable',
action='store_false',
dest='include_editable',
help='Exclude editable package from output.',
)
cmd_opts.add_option(
'--include-editable',
action='store_true',
dest='include_editable',
help='Include editable package from output.',
default=True,
)
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group, self.parser
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
session=session,
)
def run(self, options, args):
if options.outdated and options.uptodate:
raise CommandError(
"Options --outdated and --uptodate cannot be combined.")
packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
include_editables=options.include_editable,
)
# get_not_required must be called firstly in order to find and
# filter out all dependencies correctly. Otherwise a package
# can't be identified as requirement because some parent packages
# could be filtered out before.
if options.not_required:
packages = self.get_not_required(packages, options)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
self.output_package_listing(packages, options)
def get_outdated(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version > dist.parsed_version
]
def get_uptodate(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version == dist.parsed_version
]
def get_not_required(self, packages, options):
dep_keys = set()
for dist in packages:
dep_keys.update(requirement.key for requirement in dist.requires())
return {pkg for pkg in packages if pkg.key not in dep_keys}
def iter_packages_latest_infos(self, packages, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
for dist in packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if not options.pre:
# Remove prereleases
all_candidates = [candidate for candidate in all_candidates
if not candidate.version.is_prerelease]
if not all_candidates:
continue
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
if best_candidate.location.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
# This is dirty but makes the rest of the code much cleaner
dist.latest_version = remote_version
dist.latest_filetype = typ
yield dist
def output_package_listing(self, packages, options):
packages = sorted(
packages,
key=lambda dist: dist.project_name.lower(),
)
if options.list_format == 'columns' and packages:
data, header = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif options.list_format == 'freeze':
for dist in packages:
if options.verbose >= 1:
logger.info("%s==%s (%s)", dist.project_name,
dist.version, dist.location)
else:
logger.info("%s==%s", dist.project_name, dist.version)
elif options.list_format == 'json':
logger.info(format_for_json(packages, options))
def output_package_listing_columns(self, data, header):
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes)))
for val in pkg_strings:
logger.info(val)
def tabulate(vals):
# From pfmoore on GitHub:
# https://github.com/pypa/pip/issues/3651#issuecomment-216932564
assert len(vals) > 0
sizes = [0] * max(len(x) for x in vals)
for row in vals:
sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)]
result = []
for row in vals:
display = " ".join([str(c).ljust(s) if c is not None else ''
for s, c in zip_longest(sizes, row)])
result.append(display)
return result, sizes
def format_for_columns(pkgs, options):
"""
Convert the package data into something usable
by output_package_listing_columns.
"""
running_outdated = options.outdated
# Adjust the header for the `pip list --outdated` case.
if running_outdated:
header = ["Package", "Version", "Latest", "Type"]
else:
header = ["Package", "Version"]
data = []
if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs):
header.append("Location")
if options.verbose >= 1:
header.append("Installer")
for proj in pkgs:
# if we're working on the 'outdated' list, separate out the
# latest_version and type
row = [proj.project_name, proj.version]
if running_outdated:
row.append(proj.latest_version)
row.append(proj.latest_filetype)
if options.verbose >= 1 or dist_is_editable(proj):
row.append(proj.location)
if options.verbose >= 1:
row.append(get_installer(proj))
data.append(row)
return data, header
def format_for_json(packages, options):
data = []
for dist in packages:
info = {
'name': dist.project_name,
'version': six.text_type(dist.version),
}
if options.verbose >= 1:
info['location'] = dist.location
info['installer'] = get_installer(dist)
if options.outdated:
info['latest_version'] = six.text_type(dist.latest_version)
info['latest_filetype'] = dist.latest_filetype
data.append(info)
return json.dumps(data)
| Karosuo/Linux_tools | xls_handlers/xls_sum_venv/lib/python3.6/site-packages/pip/_internal/commands/list.py | Python | gpl-3.0 | 10,150 |
# -*- coding: utf-8 -*-
"""
Code to manage fetching and storing the metadata of IdPs.
"""
import datetime
import logging
import dateutil.parser
import pytz
import requests
from celery.task import task
from lxml import etree
from onelogin.saml2.utils import OneLogin_Saml2_Utils
from requests import exceptions
from third_party_auth.models import SAMLConfiguration, SAMLProviderConfig, SAMLProviderData
log = logging.getLogger(__name__)
SAML_XML_NS = 'urn:oasis:names:tc:SAML:2.0:metadata' # The SAML Metadata XML namespace
class MetadataParseError(Exception):
""" An error occurred while parsing the SAML metadata from an IdP """
pass
@task(name='third_party_auth.fetch_saml_metadata')
def fetch_saml_metadata():
"""
Fetch and store/update the metadata of all IdPs
This task should be run on a daily basis.
It's OK to run this whether or not SAML is enabled.
Return value:
tuple(num_skipped, num_attempted, num_updated, num_failed, failure_messages)
num_total: Total number of providers found in the database
num_skipped: Number of providers skipped for various reasons (see L52)
num_attempted: Number of providers whose metadata was fetched
num_updated: Number of providers that are either new or whose metadata has changed
num_failed: Number of providers that could not be updated
failure_messages: List of error messages for the providers that could not be updated
"""
# First make a list of all the metadata XML URLs:
saml_providers = SAMLProviderConfig.key_values('idp_slug', flat=True)
num_total = len(saml_providers)
num_skipped = 0
url_map = {}
for idp_slug in saml_providers:
config = SAMLProviderConfig.current(idp_slug)
# Skip SAML provider configurations which do not qualify for fetching
if any([
not config.enabled,
not config.automatic_refresh_enabled,
not SAMLConfiguration.is_enabled(config.site)
]):
num_skipped += 1
continue
url = config.metadata_source
if url not in url_map:
url_map[url] = []
if config.entity_id not in url_map[url]:
url_map[url].append(config.entity_id)
# Now attempt to fetch the metadata for the remaining SAML providers:
num_attempted = len(url_map)
num_updated = 0
failure_messages = [] # We return the length of this array for num_failed
for url, entity_ids in url_map.items():
try:
log.info("Fetching %s", url)
if not url.lower().startswith('https'):
log.warning("This SAML metadata URL is not secure! It should use HTTPS. (%s)", url)
response = requests.get(url, verify=True) # May raise HTTPError or SSLError or ConnectionError
response.raise_for_status() # May raise an HTTPError
try:
parser = etree.XMLParser(remove_comments=True)
xml = etree.fromstring(response.content, parser)
except etree.XMLSyntaxError:
raise
# TODO: Can use OneLogin_Saml2_Utils to validate signed XML if anyone is using that
for entity_id in entity_ids:
log.info(u"Processing IdP with entityID %s", entity_id)
public_key, sso_url, expires_at = _parse_metadata_xml(xml, entity_id)
changed = _update_data(entity_id, public_key, sso_url, expires_at)
if changed:
log.info(u"→ Created new record for SAMLProviderData")
num_updated += 1
else:
log.info(u"→ Updated existing SAMLProviderData. Nothing has changed.")
except (exceptions.SSLError, exceptions.HTTPError, exceptions.RequestException, MetadataParseError) as error:
# Catch and process exception in case of errors during fetching and processing saml metadata.
# Here is a description of each exception.
# SSLError is raised in case of errors caused by SSL (e.g. SSL cer verification failure etc.)
# HTTPError is raised in case of unexpected status code (e.g. 500 error etc.)
# RequestException is the base exception for any request related error that "requests" lib raises.
# MetadataParseError is raised if there is error in the fetched meta data (e.g. missing @entityID etc.)
log.exception(error.message)
failure_messages.append(
"{error_type}: {error_message}\nMetadata Source: {url}\nEntity IDs: \n{entity_ids}.".format(
error_type=type(error).__name__,
error_message=error.message,
url=url,
entity_ids="\n".join(
["\t{}: {}".format(count, item) for count, item in enumerate(entity_ids, start=1)],
)
)
)
except etree.XMLSyntaxError as error:
log.exception(error.message)
failure_messages.append(
"XMLSyntaxError: {error_message}\nMetadata Source: {url}\nEntity IDs: \n{entity_ids}.".format(
error_message=str(error.error_log),
url=url,
entity_ids="\n".join(
["\t{}: {}".format(count, item) for count, item in enumerate(entity_ids, start=1)],
)
)
)
# Return counts for total, skipped, attempted, updated, and failed, along with any failure messages
return num_total, num_skipped, num_attempted, num_updated, len(failure_messages), failure_messages
def _parse_metadata_xml(xml, entity_id):
"""
Given an XML document containing SAML 2.0 metadata, parse it and return a tuple of
(public_key, sso_url, expires_at) for the specified entityID.
Raises MetadataParseError if anything is wrong.
"""
if xml.tag == etree.QName(SAML_XML_NS, 'EntityDescriptor'):
entity_desc = xml
else:
if xml.tag != etree.QName(SAML_XML_NS, 'EntitiesDescriptor'):
raise MetadataParseError("Expected root element to be <EntitiesDescriptor>, not {}".format(xml.tag))
entity_desc = xml.find(
".//{}[@entityID='{}']".format(etree.QName(SAML_XML_NS, 'EntityDescriptor'), entity_id)
)
if not entity_desc:
raise MetadataParseError("Can't find EntityDescriptor for entityID {}".format(entity_id))
expires_at = None
if "validUntil" in xml.attrib:
expires_at = dateutil.parser.parse(xml.attrib["validUntil"])
if "cacheDuration" in xml.attrib:
cache_expires = OneLogin_Saml2_Utils.parse_duration(xml.attrib["cacheDuration"])
cache_expires = datetime.datetime.fromtimestamp(cache_expires, tz=pytz.utc)
if expires_at is None or cache_expires < expires_at:
expires_at = cache_expires
sso_desc = entity_desc.find(etree.QName(SAML_XML_NS, "IDPSSODescriptor"))
if not sso_desc:
raise MetadataParseError("IDPSSODescriptor missing")
if 'urn:oasis:names:tc:SAML:2.0:protocol' not in sso_desc.get("protocolSupportEnumeration"):
raise MetadataParseError("This IdP does not support SAML 2.0")
# Now we just need to get the public_key and sso_url
public_key = sso_desc.findtext("./{}//{}".format(
etree.QName(SAML_XML_NS, "KeyDescriptor"), "{http://www.w3.org/2000/09/xmldsig#}X509Certificate"
))
if not public_key:
raise MetadataParseError("Public Key missing. Expected an <X509Certificate>")
public_key = public_key.replace(" ", "")
binding_elements = sso_desc.iterfind("./{}".format(etree.QName(SAML_XML_NS, "SingleSignOnService")))
sso_bindings = {element.get('Binding'): element.get('Location') for element in binding_elements}
try:
# The only binding supported by python-saml and python-social-auth is HTTP-Redirect:
sso_url = sso_bindings['urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect']
except KeyError:
raise MetadataParseError("Unable to find SSO URL with HTTP-Redirect binding.")
return public_key, sso_url, expires_at
def _update_data(entity_id, public_key, sso_url, expires_at):
"""
Update/Create the SAMLProviderData for the given entity ID.
Return value:
False if nothing has changed and existing data's "fetched at" timestamp is just updated.
True if a new record was created. (Either this is a new provider or something changed.)
"""
data_obj = SAMLProviderData.current(entity_id)
fetched_at = datetime.datetime.now()
if data_obj and (data_obj.public_key == public_key and data_obj.sso_url == sso_url):
data_obj.expires_at = expires_at
data_obj.fetched_at = fetched_at
data_obj.save()
return False
else:
SAMLProviderData.objects.create(
entity_id=entity_id,
fetched_at=fetched_at,
expires_at=expires_at,
sso_url=sso_url,
public_key=public_key,
)
return True
| angelapper/edx-platform | common/djangoapps/third_party_auth/tasks.py | Python | agpl-3.0 | 9,113 |
"""
This page is in the table of contents.
Plugin to home the tool at beginning of each layer.
The home manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home
==Operation==
The default 'Activate Home' checkbox is on. When it is on, the functions described below will work, when it is off, nothing will be done.
==Settings==
===Name of Home File===
Default: home.gcode
At the beginning of a each layer, home will add the commands of a gcode script with the name of the "Name of Home File" setting, if one exists. Home does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. Home looks for those files in the alterations folder in the .skeinforge folder in the home directory. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder.
==Examples==
The following examples home the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and home.py.
> python home.py
This brings up the home dialog.
> python home.py Screw Holder Bottom.stl
The home tool is parsing the file:
Screw Holder Bottom.stl
..
The home tool has created the file:
.. Screw Holder Bottom_home.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import os
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText( fileName, text, repository = None ):
"Home a gcode linear move file or text."
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText( gcodeText, repository = None ):
"Home a gcode linear move text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'home'):
return gcodeText
if repository == None:
repository = settings.getReadRepository( HomeRepository() )
if not repository.activateHome.value:
return gcodeText
return HomeSkein().getCraftedGcode(gcodeText, repository)
def getNewRepository():
'Get new repository.'
return HomeRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Home a gcode linear move file. Chain home the gcode if it is not already homed."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'home', shouldAnalyze)
class HomeRepository:
"A class to handle the home settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.home.html', self)
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Home', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home')
self.activateHome = settings.BooleanSetting().getFromValue('Activate Home', self, True )
self.nameOfHomeFile = settings.StringSetting().getFromValue('Name of Home File:', self, 'home.gcode')
self.executeTitle = 'Home'
def execute(self):
"Home button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class HomeSkein:
"A class to home a skein of extrusions."
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.extruderActive = False
self.highestZ = None
self.homeLines = []
self.layerCount = settings.LayerCount()
self.lineIndex = 0
self.lines = None
self.oldLocation = None
self.shouldHome = False
self.travelFeedRateMinute = 957.0
def addFloat( self, begin, end ):
"Add dive to the original height."
beginEndDistance = begin.distance(end)
alongWay = self.absoluteEdgeWidth / beginEndDistance
closeToEnd = euclidean.getIntermediateLocation( alongWay, end, begin )
closeToEnd.z = self.highestZ
self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, closeToEnd.dropAxis(), closeToEnd.z ) )
def addHomeTravel( self, splitLine ):
"Add the home travel gcode."
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.highestZ = max( self.highestZ, location.z )
if not self.shouldHome:
return
self.shouldHome = False
if self.oldLocation == None:
return
if self.extruderActive:
self.distanceFeedRate.addLine('M103')
self.addHopUp( self.oldLocation )
self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.homeLines)
self.addHopUp( self.oldLocation )
self.addFloat( self.oldLocation, location )
if self.extruderActive:
self.distanceFeedRate.addLine('M101')
def addHopUp(self, location):
"Add hop to highest point."
locationUp = Vector3( location.x, location.y, self.highestZ )
self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, locationUp.dropAxis(), locationUp.z ) )
def getCraftedGcode( self, gcodeText, repository ):
"Parse gcode text and store the home gcode."
self.repository = repository
self.homeLines = settings.getAlterationFileLines(repository.nameOfHomeFile.value)
if len(self.homeLines) < 1:
return gcodeText
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization( repository )
for self.lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[self.lineIndex]
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def parseInitialization( self, repository ):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('home')
return
elif firstWord == '(<edgeWidth>':
self.absoluteEdgeWidth = abs(float(splitLine[1]))
elif firstWord == '(<travelFeedRatePerSecond>':
self.travelFeedRateMinute = 60.0 * float(splitLine[1])
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
"Parse a gcode line and add it to the bevel gcode."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'G1':
self.addHomeTravel(splitLine)
self.oldLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
elif firstWord == '(<layer>':
self.layerCount.printProgressIncrement('home')
if len(self.homeLines) > 0:
self.shouldHome = True
elif firstWord == 'M101':
self.extruderActive = True
elif firstWord == 'M103':
self.extruderActive = False
self.distanceFeedRate.addLine(line)
def main():
"Display the home dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
| nophead/Skeinforge50plus | skeinforge_application/skeinforge_plugins/craft_plugins/home.py | Python | agpl-3.0 | 8,025 |
import bleach
import json
from django import forms
from osf.models import CollectionProvider, CollectionSubmission
from admin.base.utils import get_nodelicense_choices, get_defaultlicense_choices, validate_slug
class CollectionProviderForm(forms.ModelForm):
collected_type_choices = forms.CharField(widget=forms.HiddenInput(), required=False)
status_choices = forms.CharField(widget=forms.HiddenInput(), required=False)
volume_choices = forms.CharField(widget=forms.HiddenInput(), required=False)
issue_choices = forms.CharField(widget=forms.HiddenInput(), required=False)
program_area_choices = forms.CharField(widget=forms.HiddenInput(), required=False)
_id = forms.SlugField(
required=True,
help_text='URL Slug',
validators=[validate_slug]
)
class Meta:
model = CollectionProvider
exclude = ['primary_identifier_name', 'primary_collection', 'type', 'allow_commenting', 'advisory_board',
'example', 'domain', 'domain_redirect_enabled', 'reviews_comments_anonymous',
'reviews_comments_private', 'reviews_workflow']
widgets = {
'licenses_acceptable': forms.CheckboxSelectMultiple(),
}
def __init__(self, *args, **kwargs):
nodelicense_choices = get_nodelicense_choices()
defaultlicense_choices = get_defaultlicense_choices()
super(CollectionProviderForm, self).__init__(*args, **kwargs)
self.fields['licenses_acceptable'].choices = nodelicense_choices
self.fields['default_license'].choices = defaultlicense_choices
def clean_description(self, *args, **kwargs):
if not self.data.get('description'):
return u''
return bleach.clean(
self.data.get('description'),
tags=['a', 'br', 'em', 'p', 'span', 'strong'],
attributes=['class', 'style', 'href', 'title', 'target'],
styles=['text-align', 'vertical-align'],
strip=True
)
def clean_footer_links(self, *args, **kwargs):
if not self.data.get('footer_links'):
return u''
return bleach.clean(
self.data.get('footer_links'),
tags=['a', 'br', 'div', 'em', 'p', 'span', 'strong'],
attributes=['class', 'style', 'href', 'title', 'target'],
styles=['text-align', 'vertical-align'],
strip=True
)
def clean_collected_type_choices(self):
collection_provider = self.instance
# if this is to modify an existing CollectionProvider
if collection_provider.primary_collection:
type_choices_old = set([c.strip(' ') for c in collection_provider.primary_collection.collected_type_choices])
type_choices_new = set([c.strip(' ') for c in json.loads(self.data.get('collected_type_choices'))])
type_choices_added = type_choices_new - type_choices_old
type_choices_removed = type_choices_old - type_choices_new
for item in type_choices_removed:
if CollectionSubmission.objects.filter(collection=collection_provider.primary_collection,
collected_type=item).exists():
raise forms.ValidationError(
'Cannot delete "{}" because it is used as metadata on objects.'.format(item)
)
else:
# if this is creating a CollectionProvider
type_choices_added = []
type_choices_removed = []
choices = self.data.get('collected_type_choices')
if choices:
type_choices_added = json.loads(choices)
return {
'added': type_choices_added,
'removed': type_choices_removed,
}
def clean_status_choices(self):
collection_provider = self.instance
# if this is to modify an existing CollectionProvider
if collection_provider.primary_collection:
status_choices_old = set([c.strip(' ') for c in collection_provider.primary_collection.status_choices])
status_choices_new = set([c.strip(' ') for c in json.loads(self.data.get('status_choices'))])
status_choices_added = status_choices_new - status_choices_old
status_choices_removed = status_choices_old - status_choices_new
for item in status_choices_removed:
if CollectionSubmission.objects.filter(collection=collection_provider.primary_collection,
status=item).exists():
raise forms.ValidationError(
'Cannot delete "{}" because it is used as metadata on objects.'.format(item)
)
else:
# if this is creating a CollectionProvider
status_choices_added = []
status_choices_removed = []
choices = self.data.get('status_choices')
if choices:
status_choices_added = json.loads(choices)
return {
'added': status_choices_added,
'removed': status_choices_removed,
}
def clean_volume_choices(self):
collection_provider = self.instance
# if this is to modify an existing CollectionProvider
if collection_provider.primary_collection:
volume_choices_old = set([c.strip(' ') for c in collection_provider.primary_collection.volume_choices])
volume_choices_new = set([c.strip(' ') for c in json.loads(self.data.get('volume_choices'))])
volume_choices_added = volume_choices_new - volume_choices_old
volume_choices_removed = volume_choices_old - volume_choices_new
for item in volume_choices_removed:
if CollectionSubmission.objects.filter(collection=collection_provider.primary_collection,
volume=item).exists():
raise forms.ValidationError(
'Cannot delete "{}" because it is used as metadata on objects.'.format(item)
)
else:
# if this is creating a CollectionProvider
volume_choices_added = []
volume_choices_removed = []
choices = self.data.get('volume_choices')
if choices:
volume_choices_added = json.loads(choices)
return {
'added': volume_choices_added,
'removed': volume_choices_removed,
}
def clean_issue_choices(self):
collection_provider = self.instance
# if this is to modify an existing CollectionProvider
if collection_provider.primary_collection:
issue_choices_old = set([c.strip(' ') for c in collection_provider.primary_collection.issue_choices])
issue_choices_new = set([c.strip(' ') for c in json.loads(self.data.get('issue_choices'))])
issue_choices_added = issue_choices_new - issue_choices_old
issue_choices_removed = issue_choices_old - issue_choices_new
for item in issue_choices_removed:
if CollectionSubmission.objects.filter(collection=collection_provider.primary_collection,
issue=item).exists():
raise forms.ValidationError(
'Cannot delete "{}" because it is used as metadata on objects.'.format(item)
)
else:
# if this is creating a CollectionProvider
issue_choices_added = []
issue_choices_removed = []
choices = self.data.get('issue_choices')
if choices:
issue_choices_added = json.loads(choices)
return {
'added': issue_choices_added,
'removed': issue_choices_removed,
}
def clean_program_area_choices(self):
collection_provider = self.instance
# if this is to modify an existing CollectionProvider
if collection_provider.primary_collection:
program_area_choices_old = set([c.strip(' ') for c in collection_provider.primary_collection.program_area_choices])
program_area_choices_new = set([c.strip(' ') for c in json.loads(self.data.get('program_area_choices'))])
program_area_choices_added = program_area_choices_new - program_area_choices_old
program_area_choices_removed = program_area_choices_old - program_area_choices_new
for item in program_area_choices_removed:
if CollectionSubmission.objects.filter(collection=collection_provider.primary_collection,
program_area=item).exists():
raise forms.ValidationError(
'Cannot delete "{}" because it is used as metadata on objects.'.format(item)
)
else:
# if this is creating a CollectionProvider
program_area_choices_added = []
program_area_choices_removed = []
choices = self.data.get('program_area_choices')
if choices:
program_area_choices_added = json.loads(choices)
return {
'added': program_area_choices_added,
'removed': program_area_choices_removed,
}
| baylee-d/osf.io | admin/collection_providers/forms.py | Python | apache-2.0 | 9,406 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Constant Value Tensors
TensorFlow provides several operations that you can use to generate constants.
@@zeros
@@zeros_like
@@ones
@@ones_like
@@fill
@@constant
## Sequences
@@linspace
@@range
## Random Tensors
TensorFlow has several ops that create random tensors with different
distributions. The random ops are stateful, and create new random values each
time they are evaluated.
The `seed` keyword argument in these functions acts in conjunction with
the graph-level random seed. Changing either the graph-level seed using
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed) or the
op-level seed will change the underlying seed of these operations. Setting
neither graph-level nor op-level seed, results in a random seed for all
operations.
See [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for details on the interaction between operation-level and graph-level random
seeds.
### Examples:
```python
# Create a tensor of shape [2, 3] consisting of random normal values, with mean
# -1 and standard deviation 4.
norm = tf.random_normal([2, 3], mean=-1, stddev=4)
# Shuffle the first dimension of a tensor
c = tf.constant([[1, 2], [3, 4], [5, 6]])
shuff = tf.random_shuffle(c)
# Each time we run these ops, different results are generated
sess = tf.Session()
print(sess.run(norm))
print(sess.run(norm))
# Set an op-level seed to generate repeatable sequences across sessions.
c = tf.constant([[1, 2], [3, 4], [5, 6]])
sess = tf.Session()
norm = tf.random_normal(c, seed=1234)
print(sess.run(norm))
print(sess.run(norm))
```
Another common use of random values is the initialization of variables. Also see
the [Variables How To](../../how_tos/variables/index.md).
```python
# Use random uniform values in [0, 1) as the initializer for a variable of shape
# [2, 3]. The default type is float32.
var = tf.Variable(tf.random_uniform([2, 3]), name="var")
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
print(sess.run(var))
```
@@random_normal
@@truncated_normal
@@random_uniform
@@random_shuffle
@@set_random_seed
"""
# Must be separate from array_ops to avoid a cyclic dependency.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def constant(value, dtype=None, shape=None, name="Const"):
"""Creates a constant tensor.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` and (optionally) `shape` (see examples
below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the `shape` argument (if
specified). In the case where the list length is less than the number of
elements specified by `shape`, the last element in the list will be used
to fill the remaining entries.
The argument `shape` is optional. If present, it specifies the dimensions
of the resulting tensor. If not present, then the tensor is a scalar (0-D)
if `value` is a scalar, or 1-D otherwise.
If the argument `dtype` is not specified, then the type is inferred from
the type of `value`.
For example:
```python
# Constant 1-D Tensor populated with value list.
tensor = tf.constant([1, 2, 3, 4, 5, 6, 7]) => [1 2 3 4 5 6 7]
# Constant 2-D tensor populated with scalar value -1.
tensor = tf.constant(-1.0, shape=[2, 3]) => [[-1. -1. -1.]
[-1. -1. -1.]]
```
Args:
value: A constant value (or list) of output type `dtype`.
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
g = ops.get_default_graph()
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape))
dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
const_tensor = g.create_op(
"Const", [], [dtype_value.type],
attrs={"value": tensor_value, "dtype": dtype_value}, name=name).outputs[0]
return const_tensor
@ops.RegisterShape("Const")
def _ConstantShape(op):
return [tensor_shape.TensorShape(
[d.size for d in op.get_attr("value").tensor_shape.dim])]
def _constant_tensor_conversion_function(v, dtype=None, name=None,
as_ref=False):
_ = as_ref
return constant(v, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
(list, tuple), _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
np.ndarray, _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
np.generic, _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
object, _constant_tensor_conversion_function, 200)
def _tensor_shape_tensor_conversion_function(s, dtype=None, name=None,
as_ref=False):
_ = as_ref
if not s.is_fully_defined():
raise ValueError(
"Cannot convert a partially known TensorShape to a Tensor: %s" % s)
if dtype is not None:
if dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
else:
dtype = dtypes.int32
if name is None:
name = "shape_as_tensor"
return constant(s.as_list(), dtype=dtype, name=name)
ops.register_tensor_conversion_function(
tensor_shape.TensorShape, _tensor_shape_tensor_conversion_function, 100)
def _dimension_tensor_conversion_function(d, dtype=None, name=None,
as_ref=False):
_ = as_ref
if d.value is None:
raise ValueError("Cannot convert an unknown Dimension to a Tensor: %s" % d)
if dtype is not None:
if dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
else:
dtype = dtypes.int32
if name is None:
name = "shape_as_tensor"
return constant(d.value, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
tensor_shape.Dimension, _dimension_tensor_conversion_function, 100)
| DeepThoughtTeam/tensorflow | tensorflow/python/ops/constant_op.py | Python | apache-2.0 | 7,338 |
# Copyright 2015 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_utils import uuidutils
from neutron import context
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions
from neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers import (
qos_driver)
from neutron.tests import base
class QosSRIOVAgentDriverTestCase(base.BaseTestCase):
ASSIGNED_MAC = '00:00:00:00:00:66'
PCI_SLOT = '0000:06:00.1'
def setUp(self):
super(QosSRIOVAgentDriverTestCase, self).setUp()
self.context = context.get_admin_context()
self.qos_driver = qos_driver.QosSRIOVAgentDriver()
self.qos_driver.initialize()
self.qos_driver.eswitch_mgr = mock.Mock()
self.qos_driver.eswitch_mgr.set_device_max_rate = mock.Mock()
self.max_rate_mock = self.qos_driver.eswitch_mgr.set_device_max_rate
self.rule = self._create_bw_limit_rule_obj()
self.qos_policy = self._create_qos_policy_obj([self.rule])
self.port = self._create_fake_port()
def _create_bw_limit_rule_obj(self):
rule_obj = rule.QosBandwidthLimitRule()
rule_obj.id = uuidutils.generate_uuid()
rule_obj.max_kbps = 2
rule_obj.max_burst_kbps = 200
rule_obj.obj_reset_changes()
return rule_obj
def _create_qos_policy_obj(self, rules):
policy_dict = {'id': uuidutils.generate_uuid(),
'tenant_id': uuidutils.generate_uuid(),
'name': 'test',
'description': 'test',
'shared': False,
'rules': rules}
policy_obj = policy.QosPolicy(self.context, **policy_dict)
policy_obj.obj_reset_changes()
return policy_obj
def _create_fake_port(self):
return {'port_id': uuidutils.generate_uuid(),
'profile': {'pci_slot': self.PCI_SLOT},
'device': self.ASSIGNED_MAC}
def test_create_rule(self):
self.qos_driver.create(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_update_rule(self):
self.qos_driver.update(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_delete_rules(self):
self.qos_driver.delete(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, 0)
def test__set_vf_max_rate_captures_sriov_failure(self):
self.max_rate_mock.side_effect = exceptions.SriovNicError()
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
def test__set_vf_max_rate_unknown_device(self):
with mock.patch.object(self.qos_driver.eswitch_mgr, 'device_exists',
return_value=False):
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
self.assertFalse(self.max_rate_mock.called)
| mmnelemane/neutron | neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py | Python | apache-2.0 | 3,672 |
"""Support for ADS sensors."""
import logging
import voluptuous as vol
from homeassistant.components import ads
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT
import homeassistant.helpers.config_validation as cv
from . import CONF_ADS_FACTOR, CONF_ADS_TYPE, CONF_ADS_VAR, STATE_KEY_STATE, AdsEntity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "ADS sensor"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADS_VAR): cv.string,
vol.Optional(CONF_ADS_FACTOR): cv.positive_int,
vol.Optional(CONF_ADS_TYPE, default=ads.ADSTYPE_INT): vol.In(
[
ads.ADSTYPE_INT,
ads.ADSTYPE_UINT,
ads.ADSTYPE_BYTE,
ads.ADSTYPE_DINT,
ads.ADSTYPE_UDINT,
]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=""): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up an ADS sensor device."""
ads_hub = hass.data.get(ads.DATA_ADS)
ads_var = config[CONF_ADS_VAR]
ads_type = config[CONF_ADS_TYPE]
name = config[CONF_NAME]
unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT)
factor = config.get(CONF_ADS_FACTOR)
entity = AdsSensor(ads_hub, ads_var, ads_type, name, unit_of_measurement, factor)
add_entities([entity])
class AdsSensor(AdsEntity):
"""Representation of an ADS sensor entity."""
def __init__(self, ads_hub, ads_var, ads_type, name, unit_of_measurement, factor):
"""Initialize AdsSensor entity."""
super().__init__(ads_hub, name, ads_var)
self._unit_of_measurement = unit_of_measurement
self._ads_type = ads_type
self._factor = factor
async def async_added_to_hass(self):
"""Register device notification."""
await self.async_initialize_device(
self._ads_var,
self._ads_hub.ADS_TYPEMAP[self._ads_type],
STATE_KEY_STATE,
self._factor,
)
@property
def state(self):
"""Return the state of the device."""
return self._state_dict[STATE_KEY_STATE]
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
| nkgilley/home-assistant | homeassistant/components/ads/sensor.py | Python | apache-2.0 | 2,424 |
from __future__ import absolute_import, print_function, unicode_literals
import platform
import sys
import warnings
if not (2, 7) <= sys.version_info < (3,):
sys.exit(
'ERROR: Mopidy requires Python 2.7, but found %s.' %
platform.python_version())
warnings.filterwarnings('ignore', 'could not open display')
__version__ = '1.1.1'
| pacificIT/mopidy | mopidy/__init__.py | Python | apache-2.0 | 357 |
#!/usr/bin/env python
# This script updates the allowed address pairs in Neutron with the
# 'neutron port-update' command. This is required by Calico in OpenStack,
# otherwise BGP will not be working. We query OpenStack API directly to prevent
# installing any dependencies such as python-neutronclient.
#
# USAGE: script_name arg1 arg2...argN
# arg1 - Calico network, i.e. 192.168.0.0/24
# arg2...argN - VMs MAC addresses
#
# Script exit codes (for Ansible)
# 0 - port has been updated
# 1 - error
# 2 - no update to port [default]
import json
import os
import requests
import sys
def credentials():
"""Retrieves credentials"""
username = os.environ.get('OS_USERNAME')
password = os.environ.get('OS_PASSWORD')
tenant_name = os.environ.get('OS_TENANT_NAME')
auth_url = os.environ.get('OS_AUTH_URL')
if not all((username, password, tenant_name, auth_url)):
sys.stderr.write("ERROR: Unable to get Keystone credentials\n")
exit(1)
return {
'username': username,
'password': password,
'tenant_name': tenant_name,
'auth_url': auth_url
}
def get_catalog():
"""Get service catalog from Keystone with token and all endpoints"""
creds = credentials()
headers = {'Content-Type': 'application/json'}
payload = {
"auth":
{
"tenantName": creds['tenant_name'],
"passwordCredentials": {
"username": creds['username'],
"password": creds['password']
}
}
}
auth_url = creds['auth_url'] + "/tokens"
r = requests.post(auth_url, headers=headers, data=json.dumps(payload))
parsed_json = json.loads(r.text)
if not parsed_json or 'error' in parsed_json:
sys.stderr.write("ERROR: Unable to get authentication token\n")
exit(1)
return parsed_json
def get_token(catalog):
"""Get Keystone authentication token"""
return catalog['access']['token']['id']
def neutron_public_url(catalog):
"""Get Neutron publicURL"""
for i in catalog['access']['serviceCatalog']:
if i['type'] == 'network':
for endpoint in i['endpoints']:
return endpoint['publicURL']
def list_ports(token, public_url):
"""List Neutron ports"""
headers = {'X-Auth-Token': token}
auth_url = public_url + "v2.0/ports"
r = requests.get(auth_url, headers=headers)
if r.text:
parsed_json = json.loads(r.text)
return parsed_json['ports']
else:
sys.stderr.write("ERROR: Unable to retrieve Neutron ports list\n")
exit(1)
def update_port(token, public_url, port_id, mac_address, calico_network):
"""Update Neutron port with the allowed address pairs"""
headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
payload = {
"port": {
"allowed_address_pairs": [
{
"ip_address": calico_network,
"mac_address": mac_address
}
]
}
}
auth_url = public_url + "v2.0/ports/" + port_id
r = requests.put(auth_url, headers=headers, data=json.dumps(payload))
parsed_json = json.loads(r.text)
if r.status_code != 200 or 'NeutronError' in parsed_json:
sys.stderr.write("ERROR: Unable to update port: %s\n" % parsed_json['NeutronError'])
exit(1)
else:
return r.status_code
if __name__ == "__main__":
if len(sys.argv) < 3:
sys.stderr.write("ERROR: Please run script with the correct arguments\n")
exit(1)
calico_network = sys.argv[1]
vms_mac_addresses = sys.argv[2:]
catalog = get_catalog()
token = get_token(catalog)
public_url = neutron_public_url(catalog)
ports = list_ports(token, public_url)
exit_code = 0 # no update to port
for port in ports:
port_id = port['id']
mac_address = port['mac_address']
if mac_address in vms_mac_addresses and not port['allowed_address_pairs']:
status_code = update_port(token, public_url, port_id, mac_address, calico_network)
if status_code == 200:
exit_code = 2 # port has been updated
exit(exit_code)
| KaGeN101/mantl | roles/calico/files/neutron_port_update.py | Python | apache-2.0 | 4,467 |
import gen_utils
from module_base import ModuleBase
from module_mixins import NoConfigModuleMixin
import module_utils
import vtktud
class imageGradientStructureTensor(ModuleBase, NoConfigModuleMixin):
def __init__(self, module_manager):
# initialise our base class
ModuleBase.__init__(self, module_manager)
NoConfigModuleMixin.__init__(self)
self._imageGradientStructureTensor = vtktud.vtkImageGradientStructureTensor()
# module_utils.setup_vtk_object_progress(self, self._clipPolyData,
# 'Calculating normals')
self._viewFrame = self._createViewFrame(
{'ImageGradientStructureTensor' : self._imageGradientStructureTensor})
# pass the data down to the underlying logic
self.config_to_logic()
# and all the way up from logic -> config -> view to make sure
self.syncViewWithLogic()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
NoConfigModuleMixin.close(self)
# get rid of our reference
del self._imageGradientStructureTensor
def get_input_descriptions(self):
return ('vtkImageData', 'vtkImageData', 'vtkImageData')
def set_input(self, idx, inputStream):
self._imageGradientStructureTensor.SetInput(idx, inputStream)
def get_output_descriptions(self):
return ('vtkImageData',)
def get_output(self, idx):
return self._imageGradientStructureTensor.GetOutput()
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view_to_config(self):
pass
def config_to_view(self):
pass
def execute_module(self):
self._imageGradientStructureTensor.Update()
def view(self, parent_window=None):
# if the window was visible already. just raise it
if not self._viewFrame.Show(True):
self._viewFrame.Raise()
| nagyistoce/devide | modules/user/imageGradientStructureTensor.py | Python | bsd-3-clause | 2,195 |
"""
Test basic std::weak_ptr functionality.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestSharedPtr(TestBase):
mydir = TestBase.compute_mydir(__file__)
@add_test_categories(["libc++"])
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.", lldb.SBFileSpec("main.cpp"))
self.runCmd("settings set target.import-std-module true")
self.expect("expr (int)*w.lock()", substrs=['(int) $0 = 3'])
self.expect("expr (int)(*w.lock() = 5)", substrs=['(int) $1 = 5'])
self.expect("expr (int)*w.lock()", substrs=['(int) $2 = 5'])
self.expect("expr w.use_count()", substrs=['(long) $3 = 1'])
self.expect("expr w.reset()")
self.expect("expr w.use_count()", substrs=['(long) $4 = 0'])
| endlessm/chromium-browser | third_party/llvm/lldb/test/API/commands/expression/import-std-module/weak_ptr/TestWeakPtrFromStdModule.py | Python | bsd-3-clause | 944 |
from unittest import TestCase
from decimal import Decimal
import datetime
import sys
if sys.version_info[0] == 3:
unicode_str = '\u2603'
else:
unicode_str = unicode('snowman')
import validictory
class TestType(TestCase):
def test_schema(self):
schema = {
"type": [
{"type": "array", "minItems": 10},
{"type": "string", "pattern": "^0+$"}
]
}
data1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data2 = "0"
data3 = 1203
for x in [data1, data2]:
try:
validictory.validate(x, schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
self.assertRaises(ValueError, validictory.validate, data3, schema)
def _test_type(self, typename, valids, invalids):
for x in valids:
try:
validictory.validate(x, {"type": typename})
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
for x in invalids:
self.assertRaises(ValueError, validictory.validate, x,
{"type": typename})
def test_integer(self):
valid_ints = [1, -89, 420000]
invalid_ints = [1.2, "bad", {"test": "blah"}, [32, 49], None, True]
self._test_type('integer', valid_ints, invalid_ints)
def test_string(self):
valids = ["abc", unicode_str]
invalids = [1.2, 1, {"test": "blah"}, [32, 49], None, True]
self._test_type('string', valids, invalids)
def test_number(self):
valids = [1.2, -89.42, 48, -32, Decimal('25.25')]
invalids = ["bad", {"test": "blah"}, [32.42, 494242], None, True]
self._test_type('number', valids, invalids)
def test_boolean(self):
valids = [True, False]
invalids = [1.2, "False", {"test": "blah"}, [32, 49], None, 1, 0]
self._test_type('boolean', valids, invalids)
def test_object(self):
valids = [{"blah": "test"}, {"this": {"blah": "test"}}, {1: 2, 10: 20}]
invalids = [1.2, "bad", 123, [32, 49], None, True]
self._test_type('object', valids, invalids)
def test_array(self):
valids = [[1, 89], [48, {"test": "blah"}, "49", 42], (47, 11)]
invalids = [1.2, "bad", {"test": "blah"}, 1234, None, True]
self._test_type('array', valids, invalids)
def test_null(self):
valids = [None]
invalids = [1.2, "bad", {"test": "blah"}, [32, 49], 1284, True]
self._test_type('null', valids, invalids)
def test_any(self):
valids = [1.2, "bad", {"test": "blah"}, [32, 49], None, 1284, True]
self._test_type('any', valids, [])
def test_default(self):
# test default value (same as any really)
valids = [1.2, "bad", {"test": "blah"}, [32, 49], None, 1284, True]
for x in valids:
try:
validictory.validate(x, {})
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_multi(self):
types = ["null", "integer", "string"]
valids = [None, 42, "string"]
invalids = [1.2, {"test": "blah"}, [32, 49], True]
self._test_type(types, valids, invalids)
self._test_type(tuple(types), valids, invalids)
class TestDisallow(TestType):
def _test_type(self, typename, valids, invalids):
for x in invalids:
try:
validictory.validate(x, {"disallow": typename})
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
for x in valids:
self.assertRaises(ValueError, validictory.validate, x,
{"disallow": typename})
class DateValidator(validictory.validator.SchemaValidator):
def validate_type_date(self, value):
return isinstance(value, datetime.date)
def validate_type_datetime(self, value):
return isinstance(value, datetime.datetime)
class TestCustomType(TestCase):
def test_date(self):
self._test_type('date', [datetime.date.today()],
[2010, '2010'])
def test_datetime(self):
self._test_type('datetime', [datetime.datetime.now()],
[2010, '2010', datetime.date.today()])
def test_either(self):
self._test_type(['datetime', 'date'],
[datetime.date.today(), datetime.datetime.now()],
[2010, '2010'])
def _test_type(self, typename, valids, invalids):
validator = DateValidator()
for x in valids:
try:
validator.validate(x, {"type": typename})
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
for x in invalids:
self.assertRaises(ValueError, validator.validate, x,
{"type": typename})
| travelbird/validictory | validictory/tests/test_type.py | Python | mit | 4,940 |
from src.platform.jboss.interfaces import JMXInterface
class FPrint(JMXInterface):
def __init__(self):
super(FPrint, self).__init__()
self.version = "5.0"
| GHubgenius/clusterd | src/platform/jboss/fingerprints/JBoss5JMX.py | Python | mit | 182 |
import unittest
from test import support
import collections, random, string
import collections.abc
import gc, weakref
import pickle
class DictTest(unittest.TestCase):
def test_invalid_keyword_arguments(self):
class Custom(dict):
pass
for invalid in {1 : 2}, Custom({1 : 2}):
with self.assertRaises(TypeError):
dict(**invalid)
with self.assertRaises(TypeError):
{}.update(**invalid)
def test_constructor(self):
# calling built-in types without argument must return empty
self.assertEqual(dict(), {})
self.assertIsNot(dict(), {})
def test_literal_constructor(self):
# check literal constructor for different sized dicts
# (to exercise the BUILD_MAP oparg).
for n in (0, 1, 6, 256, 400):
items = [(''.join(random.sample(string.ascii_letters, 8)), i)
for i in range(n)]
random.shuffle(items)
formatted_items = ('{!r}: {:d}'.format(k, v) for k, v in items)
dictliteral = '{' + ', '.join(formatted_items) + '}'
self.assertEqual(eval(dictliteral), dict(items))
def test_bool(self):
self.assertIs(not {}, True)
self.assertTrue({1: 2})
self.assertIs(bool({}), False)
self.assertIs(bool({1: 2}), True)
def test_keys(self):
d = {}
self.assertEqual(set(d.keys()), set())
d = {'a': 1, 'b': 2}
k = d.keys()
self.assertEqual(set(k), {'a', 'b'})
self.assertIn('a', k)
self.assertIn('b', k)
self.assertIn('a', d)
self.assertIn('b', d)
self.assertRaises(TypeError, d.keys, None)
self.assertEqual(repr(dict(a=1).keys()), "dict_keys(['a'])")
def test_values(self):
d = {}
self.assertEqual(set(d.values()), set())
d = {1:2}
self.assertEqual(set(d.values()), {2})
self.assertRaises(TypeError, d.values, None)
self.assertEqual(repr(dict(a=1).values()), "dict_values([1])")
def test_items(self):
d = {}
self.assertEqual(set(d.items()), set())
d = {1:2}
self.assertEqual(set(d.items()), {(1, 2)})
self.assertRaises(TypeError, d.items, None)
self.assertEqual(repr(dict(a=1).items()), "dict_items([('a', 1)])")
def test_contains(self):
d = {}
self.assertNotIn('a', d)
self.assertFalse('a' in d)
self.assertTrue('a' not in d)
d = {'a': 1, 'b': 2}
self.assertIn('a', d)
self.assertIn('b', d)
self.assertNotIn('c', d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = {}
self.assertEqual(len(d), 0)
d = {'a': 1, 'b': 2}
self.assertEqual(len(d), 2)
def test_getitem(self):
d = {'a': 1, 'b': 2}
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = {}
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = {1:1, 2:2, 3:3}
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
d = {}
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
class Exc(Exception): pass
d.clear()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def __next__(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, {}.update, badseq())
self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
def test_fromkeys(self):
self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = {}
self.assertIsNot(d.fromkeys('abc'), d)
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(dict): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assertIsInstance(dictlike.fromkeys('a'), dictlike)
self.assertIsInstance(dictlike().fromkeys('a'), dictlike)
class mydict(dict):
def __new__(cls):
return collections.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assertIsInstance(ud, collections.UserDict)
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(dict):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, dict.fromkeys, BadSeq())
class baddict2(dict):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
# test fast path for dictionary inputs
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0]*6)))
class baddict3(dict):
def __new__(cls):
return d
d = {i : i for i in range(10)}
res = d.copy()
res.update(a=None, b=None, c=None)
self.assertEqual(baddict3.fromkeys({"a", "b", "c"}), res)
def test_copy(self):
d = {1:1, 2:2, 3:3}
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
self.assertEqual({}.copy(), {})
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
d = {}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
d = {'a': 1, 'b': 2}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
# dict.setdefault()
d = {}
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key0', [])
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
def test_setdefault_atomic(self):
# Issue #13521: setdefault() calls __hash__ and __eq__ only once.
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
y = {hashed1: 5}
hashed2 = Hashed()
y.setdefault(hashed2, [])
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
def test_setitem_atomic_at_resize(self):
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
# 5 items
y = {hashed1: 5, 0: 0, 1: 1, 2: 2, 3: 3}
hashed2 = Hashed()
# 6th item forces a resize
y[hashed2] = []
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assertFalse(copymode < 0 and ta != tb)
self.assertFalse(a)
self.assertFalse(b)
d = {}
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
# Tests for pop with specified key
d = {}
k, v = 'abc', 'def'
d[k] = v
self.assertRaises(KeyError, d.pop, 'ghi')
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutating_iteration(self):
# changing dict size during iteration
d = {}
d[1] = 1
with self.assertRaises(RuntimeError):
for i in d:
d[i+1] = 1
def test_mutating_lookup(self):
# changing dict during a lookup (issue #14417)
class NastyKey:
mutate_dict = None
def __init__(self, value):
self.value = value
def __hash__(self):
# hash collision!
return 1
def __eq__(self, other):
if NastyKey.mutate_dict:
mydict, key = NastyKey.mutate_dict
NastyKey.mutate_dict = None
del mydict[key]
return self.value == other.value
key1 = NastyKey(1)
key2 = NastyKey(2)
d = {key1: 1}
NastyKey.mutate_dict = (d, key1)
d[key2] = 2
self.assertEqual(d, {key2: 2})
def test_repr(self):
d = {}
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = {}
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = {1: BadRepr()}
self.assertRaises(Exc, repr, d)
def test_eq(self):
self.assertEqual({}, {})
self.assertEqual({1: 2}, {1: 2})
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 1
d1 = {BadCmp(): 1}
d2 = {1: 1}
with self.assertRaises(Exc):
d1 == d2
def test_keys_contained(self):
self.helper_keys_contained(lambda x: x.keys())
self.helper_keys_contained(lambda x: x.items())
def helper_keys_contained(self, fn):
# Test rich comparisons against dict key views, which should behave the
# same as sets.
empty = fn(dict())
empty2 = fn(dict())
smaller = fn({1:1, 2:2})
larger = fn({1:1, 2:2, 3:3})
larger2 = fn({1:1, 2:2, 3:3})
larger3 = fn({4:1, 2:2, 3:3})
self.assertTrue(smaller < larger)
self.assertTrue(smaller <= larger)
self.assertTrue(larger > smaller)
self.assertTrue(larger >= smaller)
self.assertFalse(smaller >= larger)
self.assertFalse(smaller > larger)
self.assertFalse(larger <= smaller)
self.assertFalse(larger < smaller)
self.assertFalse(smaller < larger3)
self.assertFalse(smaller <= larger3)
self.assertFalse(larger3 > smaller)
self.assertFalse(larger3 >= smaller)
# Inequality strictness
self.assertTrue(larger2 >= larger)
self.assertTrue(larger2 <= larger)
self.assertFalse(larger2 > larger)
self.assertFalse(larger2 < larger)
self.assertTrue(larger == larger2)
self.assertTrue(smaller != larger)
# There is an optimization on the zero-element case.
self.assertTrue(empty == empty2)
self.assertFalse(empty != empty2)
self.assertFalse(empty == smaller)
self.assertTrue(empty != smaller)
# With the same size, an elementwise compare happens
self.assertTrue(larger != larger3)
self.assertFalse(larger == larger3)
def test_errors_in_view_containment_check(self):
class C:
def __eq__(self, other):
raise RuntimeError
d1 = {1: C()}
d2 = {1: C()}
with self.assertRaises(RuntimeError):
d1.items() == d2.items()
with self.assertRaises(RuntimeError):
d1.items() != d2.items()
with self.assertRaises(RuntimeError):
d1.items() <= d2.items()
with self.assertRaises(RuntimeError):
d1.items() >= d2.items()
d3 = {1: C(), 2: C()}
with self.assertRaises(RuntimeError):
d2.items() < d3.items()
with self.assertRaises(RuntimeError):
d3.items() > d2.items()
def test_dictview_set_operations_on_keys(self):
k1 = {1:1, 2:2}.keys()
k2 = {1:1, 2:2, 3:3}.keys()
k3 = {4:4}.keys()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {1,2})
self.assertEqual(k2 - k1, {3})
self.assertEqual(k3 - k1, {4})
self.assertEqual(k1 & k2, {1,2})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {1,2,3})
self.assertEqual(k1 ^ k2, {3})
self.assertEqual(k1 ^ k3, {1,2,4})
def test_dictview_set_operations_on_items(self):
k1 = {1:1, 2:2}.items()
k2 = {1:1, 2:2, 3:3}.items()
k3 = {4:4}.items()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {(1,1), (2,2)})
self.assertEqual(k2 - k1, {(3,3)})
self.assertEqual(k3 - k1, {(4,4)})
self.assertEqual(k1 & k2, {(1,1), (2,2)})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {(1,1), (2,2), (3,3)})
self.assertEqual(k1 ^ k2, {(3,3)})
self.assertEqual(k1 ^ k3, {(1,1), (2,2), (4,4)})
def test_dictview_mixed_set_operations(self):
# Just a few for .keys()
self.assertTrue({1:1}.keys() == {1})
self.assertTrue({1} == {1:1}.keys())
self.assertEqual({1:1}.keys() | {2}, {1, 2})
self.assertEqual({2} | {1:1}.keys(), {1, 2})
# And a few for .items()
self.assertTrue({1:1}.items() == {(1,1)})
self.assertTrue({(1,1)} == {1:1}.items())
self.assertEqual({1:1}.items() | {2}, {(1,1), 2})
self.assertEqual({2} | {1:1}.items(), {(1,1), 2})
def test_missing(self):
# Make sure dict doesn't have a __missing__ method
self.assertFalse(hasattr(dict, "__missing__"))
self.assertFalse(hasattr({}, "__missing__"))
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assertNotIn(2, d)
self.assertNotIn(2, d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
with self.assertRaises(RuntimeError) as c:
e[42]
self.assertEqual(c.exception.args, (42,))
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
with self.assertRaises(KeyError) as c:
f[42]
self.assertEqual(c.exception.args, (42,))
class G(dict):
pass
g = G()
with self.assertRaises(KeyError) as c:
g[42]
self.assertEqual(c.exception.args, (42,))
def test_tuple_keyerror(self):
# SF #1576657
d = {}
with self.assertRaises(KeyError) as c:
d[(1,)]
self.assertEqual(c.exception.args, ((1,),))
def test_bad_key(self):
# Dictionary lookups should fail if __eq__() raises an exception.
class CustomException(Exception):
pass
class BadDictKey:
def __hash__(self):
return hash(self.__class__)
def __eq__(self, other):
if isinstance(other, self.__class__):
raise CustomException
return other
d = {}
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
for stmt in ['d[x2] = 2',
'z = d[x2]',
'x2 in d',
'd.get(x2)',
'd.setdefault(x2, 42)',
'd.pop(x2)',
'd.update({x2: 2})']:
with self.assertRaises(CustomException):
exec(stmt, locals())
def test_resize1(self):
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = {}
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = {}
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
def test_empty_presized_dict_in_freelist(self):
# Bug #3537: if an empty but presized dict with a size larger
# than 7 was in the freelist, it triggered an assertion failure
with self.assertRaises(ZeroDivisionError):
d = {'a': 1 // 0, 'b': None, 'c': None, 'd': None, 'e': None,
'f': None, 'g': None, 'h': None}
d = {}
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for dictiter and
# dictview objects.
class C(object):
pass
views = (dict.items, dict.values, dict.keys)
for v in views:
obj = C()
ref = weakref.ref(obj)
container = {obj: 1}
obj.v = v(container)
obj.x = iter(obj.v)
del obj, container
gc.collect()
self.assertIs(ref(), None, "Cycle was not collected")
def _not_tracked(self, t):
# Nested containers can take several collections to untrack
gc.collect()
gc.collect()
self.assertFalse(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@support.cpython_only
def test_track_literals(self):
# Test GC-optimization of dict literals
x, y, z, w = 1.5, "a", (1, None), []
self._not_tracked({})
self._not_tracked({x:(), y:x, z:1})
self._not_tracked({1: "a", "b": 2})
self._not_tracked({1: 2, (None, True, False, ()): int})
self._not_tracked({1: object()})
# Dicts with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked({1: []})
self._tracked({1: ([],)})
self._tracked({1: {}})
self._tracked({1: set()})
@support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically-created dicts
class MyObject(object):
pass
x, y, z, w, o = 1.5, "a", (1, object()), [], MyObject()
d = dict()
self._not_tracked(d)
d[1] = "a"
self._not_tracked(d)
d[y] = 2
self._not_tracked(d)
d[z] = 3
self._not_tracked(d)
self._not_tracked(d.copy())
d[4] = w
self._tracked(d)
self._tracked(d.copy())
d[4] = None
self._not_tracked(d)
self._not_tracked(d.copy())
# dd isn't tracked right now, but it may mutate and therefore d
# which contains it must be tracked.
d = dict()
dd = dict()
d[1] = dd
self._not_tracked(dd)
self._tracked(d)
dd[1] = d
self._tracked(dd)
d = dict.fromkeys([x, y, z])
self._not_tracked(d)
dd = dict()
dd.update(d)
self._not_tracked(dd)
d = dict.fromkeys([x, y, z, o])
self._tracked(d)
dd = dict()
dd.update(d)
self._tracked(dd)
d = dict(x=x, y=y, z=z)
self._not_tracked(d)
d = dict(x=x, y=y, z=z, w=w)
self._tracked(d)
d = dict()
d.update(x=x, y=y, z=z)
self._not_tracked(d)
d.update(w=w)
self._tracked(d)
d = dict([(x, y), (z, 1)])
self._not_tracked(d)
d = dict([(x, y), (z, w)])
self._tracked(d)
d = dict()
d.update([(x, y), (z, 1)])
self._not_tracked(d)
d.update([(x, y), (z, w)])
self._tracked(d)
@support.cpython_only
def test_track_subtypes(self):
# Dict subtypes are always tracked
class MyDict(dict):
pass
self._tracked(MyDict())
def test_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
data = {1:"a", 2:"b", 3:"c"}
it = iter(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(sorted(it), sorted(data))
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop]
self.assertEqual(sorted(it), sorted(data))
def test_itemiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
data = {1:"a", 2:"b", 3:"c"}
# dictviews aren't picklable, only their iterators
itorg = iter(data.items())
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# note that the type of type of the unpickled iterator
# is not necessarily the same as the original. It is
# merely an object supporting the iterator protocol, yielding
# the same objects as the original one.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(dict(it), data)
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop[0]]
self.assertEqual(dict(it), data)
def test_valuesiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL):
data = {1:"a", 2:"b", 3:"c"}
# data.values() isn't picklable, only its iterator
it = iter(data.values())
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(sorted(list(it)), sorted(list(data.values())))
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
values = list(it) + [drop]
self.assertEqual(sorted(values), sorted(list(data.values())))
def test_instance_dict_getattr_str_subclass(self):
class Foo:
def __init__(self, msg):
self.msg = msg
f = Foo('123')
class _str(str):
pass
self.assertEqual(f.msg, getattr(f, _str('msg')))
self.assertEqual(f.msg, f.__dict__[_str('msg')])
def test_object_set_item_single_instance_non_str_key(self):
class Foo: pass
f = Foo()
f.__dict__[1] = 1
f.a = 'a'
self.assertEqual(f.__dict__, {1:1, 'a':'a'})
def check_reentrant_insertion(self, mutate):
# This object will trigger mutation of the dict when replaced
# by another value. Note this relies on refcounting: the test
# won't achieve its purpose on fully-GCed Python implementations.
class Mutating:
def __del__(self):
mutate(d)
d = {k: Mutating() for k in 'abcdefghijklmnopqr'}
for k in list(d):
d[k] = k
def test_reentrant_insertion(self):
# Reentrant insertion shouldn't crash (see issue #22653)
def mutate(d):
d['b'] = 5
self.check_reentrant_insertion(mutate)
def mutate(d):
d.update(self.__dict__)
d.clear()
self.check_reentrant_insertion(mutate)
def mutate(d):
while d:
d.popitem()
self.check_reentrant_insertion(mutate)
from test import mapping_tests
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = dict
class Dict(dict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = Dict
def test_main():
support.run_unittest(
DictTest,
GeneralMappingTests,
SubclassMappingTests,
)
if __name__ == "__main__":
test_main()
| michalliu/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_dict.py | Python | gpl-2.0 | 30,334 |
asd = gwdata.asd(2, 1)
plot = asd.plot(figsize=(8, 6))
plot.add_frequencyseries(rayleigh, newax=True, sharex=plot.axes[0])
asdax, rayax = plot.axes
asdax.set_xlabel('')
asdax.set_xlim(30, 1500)
asdax.set_ylim(5e-24, 1e-21)
asdax.set_ylabel(r'[strain/\rtHz]')
rayax.set_ylim(0, 2)
rayax.set_ylabel('Rayleigh statistic')
asdax.set_title('Sensitivity of LIGO-Livingston around GW151226', fontsize=20)
plot.show() | gwpy/gwpy.github.io | docs/0.9.0/examples/frequencyseries/rayleigh-3.py | Python | gpl-3.0 | 409 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import UnicodeMixin, base_text_type, u, s
from guessit.fileutils import load_file_in_same_dir
from guessit.textutils import find_words
from guessit.country import Country
import re
import logging
__all__ = [ 'is_iso_language', 'is_language', 'lang_set', 'Language',
'ALL_LANGUAGES', 'ALL_LANGUAGES_NAMES', 'UNDETERMINED',
'search_language', 'guess_language' ]
log = logging.getLogger(__name__)
# downloaded from http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
#
# Description of the fields:
# "An alpha-3 (bibliographic) code, an alpha-3 (terminologic) code (when given),
# an alpha-2 code (when given), an English name, and a French name of a language
# are all separated by pipe (|) characters."
_iso639_contents = load_file_in_same_dir(__file__, 'ISO-639-2_utf-8.txt')
# drop the BOM from the beginning of the file
_iso639_contents = _iso639_contents[1:]
language_matrix = [ l.strip().split('|')
for l in _iso639_contents.strip().split('\n') ]
# update information in the language matrix
language_matrix += [['mol', '', 'mo', 'Moldavian', 'moldave'],
['ass', '', '', 'Assyrian', 'assyrien']]
for lang in language_matrix:
# remove unused languages that shadow other common ones with a non-official form
if (lang[2] == 'se' or # Northern Sami shadows Swedish
lang[2] == 'br'): # Breton shadows Brazilian
lang[2] = ''
# add missing information
if lang[0] == 'und':
lang[2] = 'un'
if lang[0] == 'srp':
lang[1] = 'scc' # from OpenSubtitles
lng3 = frozenset(l[0] for l in language_matrix if l[0])
lng3term = frozenset(l[1] for l in language_matrix if l[1])
lng2 = frozenset(l[2] for l in language_matrix if l[2])
lng_en_name = frozenset(lng for l in language_matrix
for lng in l[3].lower().split('; ') if lng)
lng_fr_name = frozenset(lng for l in language_matrix
for lng in l[4].lower().split('; ') if lng)
lng_all_names = lng3 | lng3term | lng2 | lng_en_name | lng_fr_name
lng3_to_lng3term = dict((l[0], l[1]) for l in language_matrix if l[1])
lng3term_to_lng3 = dict((l[1], l[0]) for l in language_matrix if l[1])
lng3_to_lng2 = dict((l[0], l[2]) for l in language_matrix if l[2])
lng2_to_lng3 = dict((l[2], l[0]) for l in language_matrix if l[2])
# we only return the first given english name, hoping it is the most used one
lng3_to_lng_en_name = dict((l[0], l[3].split('; ')[0])
for l in language_matrix if l[3])
lng_en_name_to_lng3 = dict((en_name.lower(), l[0])
for l in language_matrix if l[3]
for en_name in l[3].split('; '))
# we only return the first given french name, hoping it is the most used one
lng3_to_lng_fr_name = dict((l[0], l[4].split('; ')[0])
for l in language_matrix if l[4])
lng_fr_name_to_lng3 = dict((fr_name.lower(), l[0])
for l in language_matrix if l[4]
for fr_name in l[4].split('; '))
# contains a list of exceptions: strings that should be parsed as a language
# but which are not in an ISO form
lng_exceptions = { 'unknown': ('und', None),
'inconnu': ('und', None),
'unk': ('und', None),
'un': ('und', None),
'gr': ('gre', None),
'greek': ('gre', None),
'esp': ('spa', None),
'español': ('spa', None),
'se': ('swe', None),
'po': ('pt', 'br'),
'pb': ('pt', 'br'),
'pob': ('pt', 'br'),
'br': ('pt', 'br'),
'brazilian': ('pt', 'br'),
'català': ('cat', None),
'cz': ('cze', None),
'ua': ('ukr', None),
'cn': ('chi', None),
'chs': ('chi', None),
'jp': ('jpn', None),
'scr': ('hrv', None)
}
def is_iso_language(language):
return language.lower() in lng_all_names
def is_language(language):
return is_iso_language(language) or language in lng_exceptions
def lang_set(languages, strict=False):
"""Return a set of guessit.Language created from their given string
representation.
if strict is True, then this will raise an exception if any language
could not be identified.
"""
return set(Language(l, strict=strict) for l in languages)
class Language(UnicodeMixin):
"""This class represents a human language.
You can initialize it with pretty much anything, as it knows conversion
from ISO-639 2-letter and 3-letter codes, English and French names.
You can also distinguish languages for specific countries, such as
Portuguese and Brazilian Portuguese.
There are various properties on the language object that give you the
representation of the language for a specific usage, such as .alpha3
to get the ISO 3-letter code, or .opensubtitles to get the OpenSubtitles
language code.
>>> Language('fr')
Language(French)
>>> s(Language('eng').french_name)
'anglais'
>>> s(Language('pt(br)').country.english_name)
'Brazil'
>>> s(Language('Español (Latinoamérica)').country.english_name)
'Latin America'
>>> Language('Spanish (Latin America)') == Language('Español (Latinoamérica)')
True
>>> s(Language('zz', strict=False).english_name)
'Undetermined'
>>> s(Language('pt(br)').opensubtitles)
'pob'
"""
_with_country_regexp = re.compile('(.*)\((.*)\)')
_with_country_regexp2 = re.compile('(.*)-(.*)')
def __init__(self, language, country=None, strict=False, scheme=None):
language = u(language.strip().lower())
with_country = (Language._with_country_regexp.match(language) or
Language._with_country_regexp2.match(language))
if with_country:
self.lang = Language(with_country.group(1)).lang
self.country = Country(with_country.group(2))
return
self.lang = None
self.country = Country(country) if country else None
# first look for scheme specific languages
if scheme == 'opensubtitles':
if language == 'br':
self.lang = 'bre'
return
elif language == 'se':
self.lang = 'sme'
return
elif scheme is not None:
log.warning('Unrecognized scheme: "%s" - Proceeding with standard one' % scheme)
# look for ISO language codes
if len(language) == 2:
self.lang = lng2_to_lng3.get(language)
elif len(language) == 3:
self.lang = (language
if language in lng3
else lng3term_to_lng3.get(language))
else:
self.lang = (lng_en_name_to_lng3.get(language) or
lng_fr_name_to_lng3.get(language))
# general language exceptions
if self.lang is None and language in lng_exceptions:
lang, country = lng_exceptions[language]
self.lang = Language(lang).alpha3
self.country = Country(country) if country else None
msg = 'The given string "%s" could not be identified as a language' % language
if self.lang is None and strict:
raise ValueError(msg)
if self.lang is None:
log.debug(msg)
self.lang = 'und'
@property
def alpha2(self):
return lng3_to_lng2[self.lang]
@property
def alpha3(self):
return self.lang
@property
def alpha3term(self):
return lng3_to_lng3term[self.lang]
@property
def english_name(self):
return lng3_to_lng_en_name[self.lang]
@property
def french_name(self):
return lng3_to_lng_fr_name[self.lang]
@property
def opensubtitles(self):
if self.lang == 'por' and self.country and self.country.alpha2 == 'br':
return 'pob'
elif self.lang in ['gre', 'srp']:
return self.alpha3term
return self.alpha3
@property
def tmdb(self):
if self.country:
return '%s-%s' % (self.alpha2, self.country.alpha2.upper())
return self.alpha2
def __hash__(self):
return hash(self.lang)
def __eq__(self, other):
if isinstance(other, Language):
return self.lang == other.lang
if isinstance(other, base_text_type):
try:
return self == Language(other)
except ValueError:
return False
return False
def __ne__(self, other):
return not self == other
def __nonzero__(self):
return self.lang != 'und'
def __unicode__(self):
if self.country:
return '%s(%s)' % (self.english_name, self.country.alpha2)
else:
return self.english_name
def __repr__(self):
if self.country:
return 'Language(%s, country=%s)' % (self.english_name, self.country)
else:
return 'Language(%s)' % self.english_name
UNDETERMINED = Language('und')
ALL_LANGUAGES = frozenset(Language(lng) for lng in lng_all_names) - frozenset([UNDETERMINED])
ALL_LANGUAGES_NAMES = lng_all_names
def search_language(string, lang_filter=None):
"""Looks for language patterns, and if found return the language object,
its group span and an associated confidence.
you can specify a list of allowed languages using the lang_filter argument,
as in lang_filter = [ 'fr', 'eng', 'spanish' ]
>>> search_language('movie [en].avi')
(Language(English), (7, 9), 0.8)
>>> search_language('the zen fat cat and the gay mad men got a new fan', lang_filter = ['en', 'fr', 'es'])
(None, None, None)
"""
# list of common words which could be interpreted as languages, but which
# are far too common to be able to say they represent a language in the
# middle of a string (where they most likely carry their commmon meaning)
lng_common_words = frozenset([
# english words
'is', 'it', 'am', 'mad', 'men', 'man', 'run', 'sin', 'st', 'to',
'no', 'non', 'war', 'min', 'new', 'car', 'day', 'bad', 'bat', 'fan',
'fry', 'cop', 'zen', 'gay', 'fat', 'cherokee', 'got', 'an', 'as',
'cat', 'her', 'be', 'hat', 'sun', 'may', 'my', 'mr', 'rum', 'pi',
# french words
'bas', 'de', 'le', 'son', 'vo', 'vf', 'ne', 'ca', 'ce', 'et', 'que',
'mal', 'est', 'vol', 'or', 'mon', 'se',
# spanish words
'la', 'el', 'del', 'por', 'mar',
# other
'ind', 'arw', 'ts', 'ii', 'bin', 'chan', 'ss', 'san', 'oss', 'iii',
'vi', 'ben', 'da', 'lt'
])
sep = r'[](){} \._-+'
if lang_filter:
lang_filter = lang_set(lang_filter)
slow = ' %s ' % string.lower()
confidence = 1.0 # for all of them
for lang in set(find_words(slow)) & lng_all_names:
if lang in lng_common_words:
continue
pos = slow.find(lang)
if pos != -1:
end = pos + len(lang)
# make sure our word is always surrounded by separators
if slow[pos - 1] not in sep or slow[end] not in sep:
continue
language = Language(slow[pos:end])
if lang_filter and language not in lang_filter:
continue
# only allow those languages that have a 2-letter code, those that
# don't are too esoteric and probably false matches
if language.lang not in lng3_to_lng2:
continue
# confidence depends on lng2, lng3, english name, ...
if len(lang) == 2:
confidence = 0.8
elif len(lang) == 3:
confidence = 0.9
else:
# Note: we could either be really confident that we found a
# language or assume that full language names are too
# common words and lower their confidence accordingly
confidence = 0.3 # going with the low-confidence route here
return language, (pos - 1, end - 1), confidence
return None, None, None
def guess_language(text):
"""Guess the language in which a body of text is written.
This uses the external guess-language python module, and will fail and return
Language(Undetermined) if it is not installed.
"""
try:
from guess_language import guessLanguage
return Language(guessLanguage(text))
except ImportError:
log.error('Cannot detect the language of the given text body, missing dependency: guess-language')
log.error('Please install it from PyPI, by doing eg: pip install guess-language')
return UNDETERMINED
| jerbob92/CouchPotatoServer | libs/guessit/language.py | Python | gpl-3.0 | 13,849 |
import sys
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
sys.path.append('../../')
from util import readCSVToMatrix
file_path = sys.argv[1]
file = open(file_path)
def plotErrorRibbon(x, mu, std, metric, pp):
fig, ax = plt.subplots(1)
ax.fill_between(x, mu+std, mu-std, facecolor='blue', alpha=0.5, label='$\pm \sigma$')
ax.plot(data[:,0], mu, alpha=0.7, label='$\mu$', linewidth=2.0)
# ax.set_title(metric + ' convergence', fontsize=22)
ax.legend(loc='upper right')
ax.set_xlabel('number of scenario samples', fontsize=20)
ax.set_ylabel('standard deviation $\mu$ $\pm \sigma$', fontsize=20)
ax.tick_params(axis='both', which='major', labelsize=16)
ax.tick_params(axis='both', which='minor', labelsize=14)
plt.axis("tight")
plt.savefig('convergence_plot_' + metric + '.svg', format='svg', dpi=1000, bbox_inches='tight')
pp.savefig(fig, bbox_inches='tight')
# plt.show()
if __name__ == '__main__':
print file
data = readCSVToMatrix(file, delimiter=',')
print data
pp = PdfPages("convergence" + '_plots.pdf')
plotErrorRibbon(data[:,0], data[:,1], data[:,5], 'coverage', pp)
plotErrorRibbon(data[:,0], data[:,2], data[:,6], 'time', pp)
plotErrorRibbon(data[:,0], data[:,3], data[:,7], 'distance', pp)
plotErrorRibbon(data[:,0], data[:,4], data[:,8], 'effort', pp)
# plotData(pp, filename, parameter)
pp.close()
| SteerSuite/steersuite-rutgers | steerstats/tools/plotting/errorRibbonPlotMulti.py | Python | gpl-3.0 | 1,412 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, _
class HrEmployee(models.Model):
_inherit = 'hr.employee'
def action_open_work_entries(self):
self.ensure_one()
return {
'type': 'ir.actions.act_window',
'name': _('%s work entries', self.display_name),
'view_mode': 'calendar,gantt,tree,form',
'res_model': 'hr.work.entry',
'context': {'default_employee_id': self.id},
'domain': [('employee_id', '=', self.id)],
}
| jeremiahyan/odoo | addons/hr_work_entry/models/hr_employee.py | Python | gpl-3.0 | 596 |
"""M2Crypto enhancement to Python's urllib for handling
'https' url's.
Copyright (c) 1999-2003 Ng Pheng Siong. All rights reserved."""
import string, sys, urllib
from urllib import *
import SSL
import httpslib
DEFAULT_PROTOCOL='sslv23'
def open_https(self, url, data=None, ssl_context=None):
if ssl_context is not None and isinstance(ssl_context, SSL.Context):
self.ctx = ssl_context
else:
self.ctx = SSL.Context(DEFAULT_PROTOCOL)
user_passwd = None
if type(url) is type(""):
host, selector = splithost(url)
if host:
user_passwd, host = splituser(host)
host = unquote(host)
realhost = host
else:
host, selector = url
urltype, rest = splittype(selector)
url = rest
user_passwd = None
if string.lower(urltype) != 'http':
realhost = None
else:
realhost, rest = splithost(rest)
if realhost:
user_passwd, realhost = splituser(realhost)
if user_passwd:
selector = "%s://%s%s" % (urltype, realhost, rest)
#print "proxy via http:", host, selector
if not host: raise IOError, ('http error', 'no host given')
if user_passwd:
import base64
auth = string.strip(base64.encodestring(user_passwd))
else:
auth = None
# Start here!
h = httpslib.HTTPSConnection(host=host, ssl_context=self.ctx)
#h.set_debuglevel(1)
# Stop here!
if data is not None:
h.putrequest('POST', selector)
h.putheader('Content-type', 'application/x-www-form-urlencoded')
h.putheader('Content-length', '%d' % len(data))
else:
h.putrequest('GET', selector)
if auth: h.putheader('Authorization', 'Basic %s' % auth)
for args in self.addheaders: apply(h.putheader, args)
h.endheaders()
if data is not None:
h.send(data + '\r\n')
# Here again!
resp = h.getresponse()
fp = resp.fp
return urllib.addinfourl(fp, resp.msg, "https:" + url)
# Stop again.
# Minor brain surgery.
URLopener.open_https = open_https
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/M2Crypto/m2urllib.py | Python | agpl-3.0 | 2,124 |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import join
from workspace_tools.toolchains import mbedToolchain
from workspace_tools.settings import ARM_BIN, ARM_INC, ARM_LIB, MY_ARM_CLIB, ARM_CPPLIB
from workspace_tools.hooks import hook_tool
from workspace_tools.settings import GOANNA_PATH
class ARM(mbedToolchain):
LINKER_EXT = '.sct'
LIBRARY_EXT = '.ar'
STD_LIB_NAME = "%s.ar"
DIAGNOSTIC_PATTERN = re.compile('"(?P<file>[^"]+)", line (?P<line>\d+)( \(column (?P<column>\d+)\)|): (?P<severity>Warning|Error): (?P<message>.+)')
DEP_PATTERN = re.compile('\S+:\s(?P<file>.+)\n')
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
mbedToolchain.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
if target.core == "Cortex-M0+":
cpu = "Cortex-M0"
elif target.core == "Cortex-M4F":
cpu = "Cortex-M4.fp"
elif target.core == "Cortex-M7F":
cpu = "Cortex-M7.fp.sp"
else:
cpu = target.core
main_cc = join(ARM_BIN, "armcc")
common = ["-c",
"--cpu=%s" % cpu, "--gnu",
"-Otime", "--split_sections", "--apcs=interwork",
"--brief_diagnostics", "--restrict", "--multibyte_chars"
]
if "save-asm" in self.options:
common.extend(["--asm", "--interleave"])
if "debug-info" in self.options:
common.append("-g")
common.append("-O0")
else:
common.append("-O3")
common_c = [
"--md", "--no_depend_system_headers",
'-I%s' % ARM_INC
]
self.asm = [main_cc] + common + ['-I%s' % ARM_INC]
if not "analyze" in self.options:
self.cc = [main_cc] + common + common_c + ["--c99"]
self.cppc = [main_cc] + common + common_c + ["--cpp", "--no_rtti"]
else:
self.cc = [join(GOANNA_PATH, "goannacc"), "--with-cc=" + main_cc.replace('\\', '/'), "--dialect=armcc", '--output-format="%s"' % self.GOANNA_FORMAT] + common + common_c + ["--c99"]
self.cppc= [join(GOANNA_PATH, "goannac++"), "--with-cxx=" + main_cc.replace('\\', '/'), "--dialect=armcc", '--output-format="%s"' % self.GOANNA_FORMAT] + common + common_c + ["--cpp", "--no_rtti"]
self.ld = [join(ARM_BIN, "armlink")]
self.sys_libs = []
self.ar = join(ARM_BIN, "armar")
self.elf2bin = join(ARM_BIN, "fromelf")
def remove_option(self, option):
for tool in [self.asm, self.cc, self.cppc]:
if option in tool:
tool.remove(option)
def assemble(self, source, object, includes):
# Preprocess first, then assemble
tempfile = object + '.E.s'
return [
self.asm + ['-D%s' % s for s in self.get_symbols() + self.macros] + ["-I%s" % i for i in includes] + ["-E", "-o", tempfile, source],
self.hook.get_cmdline_assembler(self.asm + ["-o", object, tempfile])
]
def parse_dependencies(self, dep_path):
dependencies = []
for line in open(dep_path).readlines():
match = ARM.DEP_PATTERN.match(line)
if match is not None:
dependencies.append(match.group('file'))
return dependencies
def parse_output(self, output):
for line in output.splitlines():
match = ARM.DIAGNOSTIC_PATTERN.match(line)
if match is not None:
self.cc_info(
match.group('severity').lower(),
match.group('file'),
match.group('line'),
match.group('message'),
target_name=self.target.name,
toolchain_name=self.name
)
match = self.goanna_parse_line(line)
if match is not None:
self.cc_info(
match.group('severity').lower(),
match.group('file'),
match.group('line'),
match.group('message')
)
def get_dep_opt(self, dep_path):
return ["--depend", dep_path]
def archive(self, objects, lib_path):
self.default_cmd([self.ar, '-r', lib_path] + objects)
def link(self, output, objects, libraries, lib_dirs, mem_map):
if len(lib_dirs):
args = ["-o", output, "--userlibpath", ",".join(lib_dirs), "--info=totals", "--list=.link_totals.txt"]
else:
args = ["-o", output, "--info=totals", "--list=.link_totals.txt"]
if mem_map:
args.extend(["--scatter", mem_map])
if hasattr(self.target, "link_cmdline_hook"):
args = self.target.link_cmdline_hook(self.__class__.__name__, args)
self.default_cmd(self.ld + args + objects + libraries + self.sys_libs)
@hook_tool
def binary(self, resources, elf, bin):
args = [self.elf2bin, '--bin', '-o', bin, elf]
if hasattr(self.target, "binary_cmdline_hook"):
args = self.target.binary_cmdline_hook(self.__class__.__name__, args)
self.default_cmd(args)
class ARM_STD(ARM):
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
ARM.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
self.cc += ["-D__ASSERT_MSG"]
self.cppc += ["-D__ASSERT_MSG"]
self.ld.append("--libpath=%s" % ARM_LIB)
class ARM_MICRO(ARM):
PATCHED_LIBRARY = False
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
ARM.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
# Compiler
self.asm += ["-D__MICROLIB"]
self.cc += ["--library_type=microlib", "-D__MICROLIB", "-D__ASSERT_MSG"]
self.cppc += ["--library_type=microlib", "-D__MICROLIB", "-D__ASSERT_MSG"]
# Linker
self.ld.append("--library_type=microlib")
# We had to patch microlib to add C++ support
# In later releases this patch should have entered mainline
if ARM_MICRO.PATCHED_LIBRARY:
self.ld.append("--noscanlib")
# System Libraries
self.sys_libs.extend([join(MY_ARM_CLIB, lib+".l") for lib in ["mc_p", "mf_p", "m_ps"]])
if target.core == "Cortex-M3":
self.sys_libs.extend([join(ARM_CPPLIB, lib+".l") for lib in ["cpp_ws", "cpprt_w"]])
elif target.core in ["Cortex-M0", "Cortex-M0+"]:
self.sys_libs.extend([join(ARM_CPPLIB, lib+".l") for lib in ["cpp_ps", "cpprt_p"]])
else:
self.ld.append("--libpath=%s" % ARM_LIB)
| nabilbendafi/mbed | workspace_tools/toolchains/arm.py | Python | apache-2.0 | 7,354 |
# This file is part of the Soletta Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gdb
import re
## IMPORTANT NOTE:
#
# This file is a Python GDB script that is highly dependent on
# symbol names, even the internal functions and parameters.
#
# Whenever depending on a symbol, mark them in the source file
# so people know they have to adapt this file on changes.
## LOADING:
#
# This file should be auto-loaded by gdb if it is installed in GDB's
# auto-load directory and matches the installed libsoletta.so,
# including the final so-version.
#
# If soletta is installed to custom directory, then make sure GDB knows
# about this location and that the directory is marked as safe-path:
#
# (gdb) add-auto-load-scripts-directory ${soletta_prefix}/share/gdb/auto-load
# (gdb) add-auto-load-safe-path ${soletta_prefix}/share/gdb/auto-load
#
# It may be included directly if not auto-loaded:
#
# (gdb) source ${soletta_prefix}/share/gdb/auto-load/libsoletta.so-gdb.py
#
## Usage:
# commands start with 'sol_' prefix, then you can use 'apropos ^sol_' to
# filter commands in our namespace or tabl-completion.
# GDB's "help command" to get more information
defvalue_member_map = {
"string": "s",
"byte": "byte",
"boolean": "b",
"int": "i",
"float": "f",
"rgb": "rgb",
"direction_vector": "direction_vector",
}
def get_type_description(type):
try:
tdesc = type["description"]
if tdesc:
return tdesc.dereference()
except KeyError:
pass
return None
def get_node_type_description(node):
type = node["type"]
return get_type_description(type)
def _get_node_port_index_by_name(node, member, port_name):
tdesc = get_node_type_description(node)
if not tdesc:
return -1
array = tdesc[member]
if not array:
return -1
i = 0
while array[i]:
port = array[i]
if port["name"] and port["name"].string() == port_name:
return i
i += 1
return -1
def get_node_port_out_index_by_name(node, port_name):
return _get_node_port_index_by_name(node, "ports_out", port_name)
def get_node_port_in_index_by_name(node, port_name):
return _get_node_port_index_by_name(node, "ports_in", port_name)
def _get_node_port_name_by_index(node, member, port_index):
tdesc = get_node_type_description(node)
if not tdesc:
return None
array = tdesc[member]
if not array:
return None
i = 0
while array[i]:
if i == port_index:
port = array[i]
if port["name"]:
return port["name"].string()
return None
elif i > port_index:
break
i += 1
return None
def get_node_port_out_name_by_index(node, port_index):
return _get_node_port_name_by_index(node, "ports_out", port_index)
def get_node_port_in_name_by_index(node, port_index):
return _get_node_port_name_by_index(node, "ports_in", port_index)
class FlowTypePrinter(object):
"Print a 'struct sol_flow_node_type'"
def __init__(self, val):
self.val = val
self.port_in_type = gdb.lookup_type("struct sol_flow_port_type_in").const().pointer()
def display_hint(self):
return 'sol_flow_node_type'
def _port_description_to_string(self, index, port, port_type):
s = ("\n %d %s (%s)\n" \
" description: %s\n") % (
index,
port["name"].string(),
port["data_type"].string(),
port["description"].string())
if port_type["connect"]:
s += " connect(): %s\n" % (port_type["connect"],)
if port_type["disconnect"]:
s += " disconnect(): %s\n" % (port_type["disconnect"],)
if port_type.type == self.port_in_type and port_type["process"]:
s += " process(): %s\n" % (port_type["process"],)
return s
def _option_description_to_string(self, option):
data_type = option["data_type"].string()
defvalue_member = defvalue_member_map.get(data_type)
if not defvalue_member:
defvalue = ""
else:
defvalue = option["defvalue"][defvalue_member]
if data_type == "string":
if defvalue:
defvalue = defvalue.string()
else:
defvalue = "NULL"
defvalue = " (default=%s)" % (defvalue,)
return "\n %s(%s) \"%s\"%s," % (
option["name"].string(),
data_type,
option["description"].string(),
defvalue)
def _ports_description_to_string(self, array, get_port_type):
if not array:
return ""
i = 0
r = []
while array[i]:
port_type = get_port_type(i)
r.append(self._port_description_to_string(i, array[i], port_type))
i += 1
if i > 0:
r.append("\n ")
return "".join(r)
def _options_description_to_string(self, opts):
if not opts:
return ""
opts = opts.dereference()
array = opts["members"]
if not array:
return ""
i = 0
r = []
while array[i]["name"]:
r.append(self._option_description_to_string(array[i]))
i += 1
if i > 0:
r.append("\n ")
return "".join(r)
def to_string(self):
type = self.val
tdesc = get_type_description(type)
if tdesc:
get_port_in = gdb.parse_and_eval("sol_flow_node_type_get_port_in")
get_port_out = gdb.parse_and_eval("sol_flow_node_type_get_port_out")
p_type = type.address
ports_in = self._ports_description_to_string(tdesc["ports_in"], lambda idx: get_port_in(p_type, idx))
ports_out = self._ports_description_to_string(tdesc["ports_out"], lambda idx: get_port_out(p_type, idx))
options = self._options_description_to_string(tdesc["options"])
return "%s=%s" \
"\n name=\"%s\"," \
"\n category=\"%s\"," \
"\n description=\"%s\"," \
"\n ports_in={%s}," \
"\n ports_out={%s}," \
"\n options={%s})" % (
tdesc["symbol"].string(),
type.address,
tdesc["name"].string(),
tdesc["category"].string(),
tdesc["description"].string(),
ports_in,
ports_out,
options)
return "(struct sol_flow_node_type)%s (no node type description)" % (type.address,)
class FlowPrinter(object):
"Print a 'struct sol_flow_node'"
def __init__(self, val):
self.val = val
def display_hint(self):
return 'sol_flow_node'
def to_string(self):
id = self.val["id"]
type = self.val["type"]
if not type:
return "sol_flow_node(%s) is under construction." % (
self.val.address,)
tname = "%#x (no node type description)" % (type.address,)
tdesc = get_type_description(type)
if tdesc:
tname = "%s(%s=%s)" % (
tdesc["name"].string(),
tdesc["symbol"].string(),
type.address)
return "sol_flow_node(%s, id=\"%s\", type=%s)" % (
self.val.address, id.string(), tname)
def sol_flow_pretty_printers(val):
lookup_tag = val.type.tag
if lookup_tag == "sol_flow_node":
return FlowPrinter(val)
elif lookup_tag == "sol_flow_node_type":
return FlowTypePrinter(val)
return None
def register_pretty_printers(objfile):
gdb.pretty_printers.append(sol_flow_pretty_printers)
def get_type_options_string(type, options):
if not options:
return ""
tdesc = get_type_description(type)
if not tdesc or not tdesc["options"] or not tdesc["options"]["members"]:
return "OPTIONS: %s (no node type description)\n" % (options,)
string = ""
opts_desc = tdesc["options"]
array = opts_desc["members"]
i = 0
string += "OPTIONS: (struct %s*)%s\n" % (tdesc["options_symbol"].string(), options)
opt_type = gdb.lookup_type("struct %s" % (tdesc["options_symbol"].string(),))
options = options.cast(opt_type.pointer())
while array[i]["name"]:
m = array[i]
name = m["name"].string()
data_type = m["data_type"].string()
description = m["description"].string()
value = options[name]
if data_type == "string":
if value:
value = value.string()
else:
value = "NULL"
defvalue_member = defvalue_member_map.get(data_type)
if not defvalue_member:
defvalue = ""
else:
defvalue = m["defvalue"][defvalue_member]
if data_type == "string":
if defvalue:
defvalue = defvalue.string()
else:
defvalue = "NULL"
defvalue = " (default=%s)" % (defvalue,)
string += " %s (%s) = %s // %s%s\n" % (name, data_type, value, description, defvalue)
i += 1
string += "\n"
return string
class InspectAndBreakIfMatches(gdb.Breakpoint):
class InternalBreak(gdb.Breakpoint):
def __init__(self, method, banner=None, matches=None, values=None):
addr = "*%s" % (method.cast(gdb.lookup_type("long")),)
self.method = method
self.banner = banner
self.matches = matches or {}
self.values = values or {}
gdb.Breakpoint.__init__(self, addr, gdb.BP_BREAKPOINT, internal=True, temporary=True)
def stop(self):
if self.banner:
if callable(self.banner):
self.banner(self.matches, self.values)
else:
gdb.write(self.banner)
return True
def __init__(self, spec, matches):
gdb.Breakpoint.__init__(self, spec, gdb.BP_BREAKPOINT, internal=False)
self.matches = {}
for k, v in matches.items():
self.matches[k] = get_str_or_regexp_match(v)
def print_matches(self, values=None):
gdb.write("%s matches:\n" % (self.__class__.__name__,), gdb.STDERR)
if not values:
values = {}
for k, func in self.matches.items():
v = values.get(k)
if v is None:
gdb.write(" %s = %s (no value provided)\n" % (k, func.__doc__), gdb.STDERR)
else:
try:
res = func(v)
except Exception as e:
res = "Exception executing match: %s" % (e,)
gdb.write(" %s = %s (value: '%s', match: %s)\n" %
(k, func.__doc__, v, res), gdb.STDERR)
gdb.write("\n", gdb.STDERR)
def get_values(self):
raise NotImplemented()
def stop(self):
try:
values = self.get_values()
except Exception as e:
gdb.write("Exception at %s.get_values(): %s\n" % (
self.__class__.__name__, e), gdb.STDERR)
return False
if not values:
gdb.write("%s.get_values() did not return values.\n" % (
self.__class__.__name__,), gdb.STDERR)
return False
def print_values():
gdb.write("Values:\n", gdb.STDERR)
for k, v in values.items():
gdb.write(" %s: %s\n" % (k, v), gdb.STDERR)
gdb.write("\n", gdb.STDERR)
for k, match_func in self.matches.items():
try:
v = values[k]
except KeyError:
gdb.write("%s.get_values() did not provide key '%s'.\n" % (
self.__class__.__name__, k), gdb.STDERR)
self.print_matches(values)
print_values()
return False
try:
if not match_func(v):
return False
except Exception as e:
gdb.write("Exception at %s.stop() while matching %s %s (%s): %s\n" % (
self.__class__.__name__, k, v, match_func.__doc__, e,), gdb.STDERR)
self.print_matches(values)
return False
method = values.get("method")
banner = values.get("banner")
if not method:
node = values.get("node")
if node:
gdb.write("NODE: %s\n" % (node,), gdb.STDERR)
gdb.write("%s did not return the internal method to break at.\n" % (
self.__class__.__name__,), gdb.STDERR)
self.print_matches(values)
gdb.write("Breaking at the caller function %s\n" % (self.location,),
gdb.STDERR)
return True
def add_breakpoint():
try:
self.InternalBreak(method, banner, self.matches, values)
except Exception as e:
gdb.write("Could not add internal breakpoint: %s\n" % (e,), gdb.STDERR)
self.print_matches(values)
gdb.post_event(add_breakpoint)
return False
def get_str_or_regexp_match(string):
if not string:
string = "/.*/"
if len(string) > 2 and string.startswith("/") and string.endswith("/"):
r = re.compile(string[1:-1])
match = lambda x: bool(r.match(x))
else:
match = lambda x: string == x
match.__doc__ = string
return match
class FlowBreakOpen(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "sol_flow_node_init", matches)
def get_values(self):
node_id = gdb.parse_and_eval("name")
if node_id:
node_id = node_id.string()
type = gdb.parse_and_eval("type")
method = type["open"]
node = gdb.parse_and_eval("*node")
options = gdb.parse_and_eval("options")
def banner(matches, values):
gdb.write("""\
Break before opening node:
FUNCTION: %s
NODE....: %s (filter: %s)
%s""" % (method, node,
matches["node_id"].__doc__,
get_type_options_string(node["type"], options)))
return {
"node": node,
"node_id": node_id,
"method": method,
"banner": banner,
}
class FlowBreakClose(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "sol_flow_node_fini", matches)
def get_values(self):
node = gdb.parse_and_eval("*node")
node_id = node["id"]
if node_id:
node_id = node_id.string()
type = node["type"]
method = type["close"]
def banner(matches, values):
gdb.write("""\
Break before closing node:
FUNCTION: %s
NODE....: %s (filter: %s)
""" % (method, node,
matches["node_id"].__doc__))
return {
"node": node,
"node_id": node_id,
"method": method,
"banner": banner,
}
class FlowBreakSend(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "inspector_will_send_packet", matches)
def get_values(self):
node = gdb.parse_and_eval("*src_node")
port = gdb.parse_and_eval("src_port")
packet = gdb.parse_and_eval("*packet")
node_id = node["id"]
if node_id:
node_id = node_id.string()
port_name = get_node_port_out_name_by_index(node, port)
packet_type = packet["type"]["name"].string()
type = gdb.parse_and_eval("(struct sol_flow_node_container_type *)src_node->parent->type")
method = type["send"]
def banner(matches, values):
gdb.write("""\
Break before sending packet:
FUNCTION: %s
NODE....: %s (filter: %s)
PORT....: %s (index: %s, filter: %s)
PACKET..: %s (filter: %s)
""" % (
method,
node,
matches["node_id"].__doc__,
port_name,
port,
matches["port_name"].__doc__,
packet,
matches["packet_type"].__doc__))
return {
"node": node,
"node_id": node_id,
"port_name": port_name,
"packet_type": packet_type,
"method": method,
"banner": banner,
}
class FlowBreakProcess(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "inspector_will_deliver_packet", matches)
def get_values(self):
node = gdb.parse_and_eval("*dst_node")
port = gdb.parse_and_eval("dst_port")
packet = gdb.parse_and_eval("*packet")
node_id = node["id"]
if node_id:
node_id = node_id.string()
port_name = get_node_port_in_name_by_index(node, port)
packet_type = packet["type"]["name"].string()
get_port_in = gdb.parse_and_eval("sol_flow_node_type_get_port_in")
type = node["type"]
port_type = get_port_in(type, port)
if not port_type:
method = None
else:
method = port_type["process"]
def banner(matches, values):
gdb.write("""\
Break before processing packet:
FUNCTION: %s
NODE....: %s (filter: %s)
PORT....: %s (index: %s, filter: %s)
PACKET..: %s (filter: %s)
""" % (
method,
node,
matches["node_id"].__doc__,
port_name,
port,
matches["port_name"].__doc__,
packet,
matches["packet_type"].__doc__))
return {
"node": node,
"node_id": node_id,
"port_name": port_name,
"packet_type": packet_type,
"method": method,
"banner": banner,
}
class FlowCommand(gdb.Command):
"Commands to operate with 'sol_flow'"
def __init__(self):
gdb.Command.__init__(self, "sol_flow", gdb.COMMAND_USER, gdb.COMPLETE_COMMAND, True)
def invoke(self, arg, from_tty):
raise gdb.GdbError("missing sub-command: break or print")
class FlowBreakCommand(gdb.Command):
"Add an execution break when sol_flow events happen."
def __init__(self):
gdb.Command.__init__(self, "sol_flow break", gdb.COMMAND_BREAKPOINTS, gdb.COMPLETE_SYMBOL, True)
def invoke(self, arg, from_tty):
raise gdb.GdbError("missing sub-command: open, close, send or process")
class FlowBreakFilterBaseCommand(gdb.Command):
"""Base command for 'sol_flow break' subcommands.
The subcommand will be registered and will take matches as list of
optional arguments. If not available then None is assumed. These
parameters will be sent to breakpoint in order.
"""
def __init__(self, subcommand, matches, breakpoint):
gdb.Command.__init__(self, "sol_flow break " + subcommand, gdb.COMMAND_BREAKPOINTS, gdb.COMPLETE_SYMBOL, True)
self.matches = matches
self.breakpoint = breakpoint
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
params = {}
for i, name in enumerate(self.matches):
if len(arg) > i:
p = arg[i]
else:
p = None
params[name] = p
self.breakpoint(params)
self.dont_repeat()
class FlowBreakOpenCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node is created (type->open).
Arguments: node_id
node_id may be an exact string or a regular expression if enclosed
in "//". Examples:
sol_flow break open timer
will break on nodes with id "timer" (exact match)
sol_flow break open /^timer.*$/
will break on nodes with id that matches regular expression
"^timer.*$" (starts with "timer")
"""
def __init__(self):
matches = ["node_id"]
FlowBreakFilterBaseCommand.__init__(self, "open", matches, FlowBreakOpen)
class FlowBreakCloseCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node is destroyed (type->close).
Arguments: node_id
node_id may be an exact string or a regular expression if enclosed
in "//". Examples:
sol_flow break close timer
will break on nodes with id "timer" (exact match)
sol_flow break close /^timer.*$/
will break on nodes with id that matches regular expression
"^timer.*$" (starts with "timer")
"""
def __init__(self):
matches = ["node_id"]
FlowBreakFilterBaseCommand.__init__(self, "close", matches, FlowBreakClose)
class FlowBreakSendCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node sends a packet on its output port.
Arguments: node_id port_name packet_type
Each argument is optional and may be a string or a regular
expression if enclosed in "//". If omitted the regular expression
/.*/ is assumed, matching all patterns.
"""
def __init__(self):
matches = ["node_id", "port_name", "packet_type"]
FlowBreakFilterBaseCommand.__init__(self, "send", matches, FlowBreakSend)
class FlowBreakProcessCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node will receive a packet on its input port (port's process()).
Arguments: node_id port_name packet_type
Each argument is optional and may be a string or a regular
expression if enclosed in "//". If omitted the regular expression
/.*/ is assumed, matching all patterns.
"""
def __init__(self):
matches = ["node_id", "port_name", "packet_type"]
FlowBreakFilterBaseCommand.__init__(self, "process", matches, FlowBreakProcess)
class FlowPrintCommand(gdb.Command):
"Print sol_flow types"
def __init__(self):
gdb.Command.__init__(self, "sol_flow print", gdb.COMMAND_BREAKPOINTS, gdb.COMPLETE_COMMAND, True)
def invoke(self, arg, from_tty):
raise gdb.GdbError("missing sub-command: type, port or options")
def get_node_type_from_exp(arg):
node = gdb.parse_and_eval(arg)
if not node:
raise gdb.GdbError("invalid node: %s" % (arg,))
gt = node.type.unqualified()
sol_flow_node_type = gdb.lookup_type("struct sol_flow_node")
sol_flow_node_type_type = gdb.lookup_type("struct sol_flow_node_type")
if gt == sol_flow_node_type or gt == sol_flow_node_type.pointer() or \
gt == sol_flow_node_type.const().pointer():
return node["type"]
elif gt == sol_flow_node_type_type or gt == sol_flow_node_type_type.pointer() or \
gt == sol_flow_node_type_type.const().pointer():
return node
else:
raise gdb.GdbError("invalid node: %s" % (arg,))
class FlowPrintTypeCommand(gdb.Command):
"""Prints the type information for the given 'struct sol_flow_node'.
Arguments: node
"""
def __init__(self):
gdb.Command.__init__(self, "sol_flow print type", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, True)
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
if len(arg) < 1:
raise gdb.GdbError("missing pointer to struct sol_flow_node")
type = get_node_type_from_exp(arg[0])
gdb.write("%s\n" % (type.dereference(),))
class FlowPrintPortCommand(gdb.Command):
"""Prints the port information for the given node.
Arguments: node [direction] [filter_type] [filter_specifier]
node is the pointer to node where to find the port.
direction may be 'in', 'out' or 'both'. If omitted, both will be
assumed. May be omitted and 'both' is used.
filter_type may be 'all', 'number' or 'name'. If omitted, all
will be assumed.
If filter_type is 'number', then filter_specifier must be an integer.
If filter_type is 'name', then filter_specifier must be a string
or a regular expression enclosed in "//".
If filter_type is omitted, then it's gussed from filter_specifier.
"""
def __init__(self):
gdb.Command.__init__(self, "sol_flow print port", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, True)
def _print_ports(self, type, tdesc, member, filter):
array = tdesc[member]
if not array:
return
did = 0
i = 0
if member == "ports_in":
get_port_type = gdb.parse_and_eval("sol_flow_node_type_get_port_in")
else:
get_port_type = gdb.parse_and_eval("sol_flow_node_type_get_port_out")
while array[i]:
port = array[i]
if filter["type"] == "all" or \
(filter["type"] == "number" and filter["number"] == i) or \
(filter["type"] == "name" and filter["name"](port["name"].string())):
if did == 0:
gdb.write("%s:\n" % member)
did += 1
gdb.write(" %d: %s (%s)\n description: %s\n" % (
i,
port["name"].string(),
port["data_type"].string(),
port["description"].string(),
))
port_type = get_port_type(type, i)
if port_type["connect"]:
gdb.write(" connect(): %s\n" % (port_type["connect"],))
if port_type["disconnect"]:
gdb.write(" disconnect(): %s\n" % (port_type["disconnect"],))
if member == "ports_in" and port_type["process"]:
gdb.write(" process(): %s\n" % (port_type["process"],))
gdb.write("\n")
i += 1
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
if len(arg) < 1:
raise gdb.GdbError("missing pointer to struct sol_flow_node")
direction = "both"
filter = {"type": "all"}
if len(arg) > 1:
direction = arg[1]
if direction not in ("both", "in", "out"):
direction = "both"
try:
filter["number"] = int(arg[1])
filter["type"] = "number"
except ValueError:
filter["name"] = get_str_or_regexp_match(arg[1])
filter["type"] = "name"
if len(arg) > 2:
filter["type"] = arg[2]
if filter["type"] not in ("all", "number", "name"):
try:
filter["number"] = int(arg[2])
filter["type"] = "number"
except ValueError:
filter["name"] = get_str_or_regexp_match(arg[2])
filter["type"] = "name"
elif filter["type"] == 'number':
if len(arg) < 4:
raise gdb.GdbError("missing port number to filter")
filter["number"] = int(arg[3])
elif filter["type"] == 'name':
if len(arg) < 4:
raise gdb.GdbError("missing port name to filter")
filter["name"] = get_str_or_regexp_match(arg[3])
type = get_node_type_from_exp(arg[0])
tdesc = get_type_description(type)
if not tdesc:
gdb.write("no node type description\n")
return
if direction == "both" or direction == "in":
self._print_ports(type, tdesc, "ports_in", filter)
if direction == "both" or direction == "out":
self._print_ports(type, tdesc, "ports_out", filter)
class FlowPrintOptionsCommand(gdb.Command):
"""Prints the options used to open the given node.
Arguments: node options
node is the pointer to node where to find the port.
options is the pointer to options to open to given node.
"""
def __init__(self):
gdb.Command.__init__(self, "sol_flow print options", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, True)
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
if len(arg) != 2:
raise gdb.GdbError("Usage: sol_flow print options <node> <options>")
type = get_node_type_from_exp(arg[0])
options = gdb.parse_and_eval(arg[1])
gdb.write(get_type_options_string(type, options))
FlowCommand()
FlowBreakCommand()
FlowBreakOpenCommand()
FlowBreakCloseCommand()
FlowBreakSendCommand()
FlowBreakProcessCommand()
FlowPrintCommand()
FlowPrintTypeCommand()
FlowPrintPortCommand()
FlowPrintOptionsCommand()
register_pretty_printers(gdb.current_objfile())
| tripzero/soletta | data/gdb/libsoletta.so-gdb.py | Python | apache-2.0 | 29,075 |
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.http import HttpRequest, HttpResponse
from django.test import modify_settings, override_settings
from django.urls import reverse
from django.utils.translation import gettext_lazy
def add_level_messages(storage):
"""
Add 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super().enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, update that constant also.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super().disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests:
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS={},
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__, self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return HttpRequest()
def get_response(self):
return HttpResponse()
def get_storage(self, data=None):
"""
Return the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_repr(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(
repr(storage),
f'<{self.storage_class.__qualname__}: request=<HttpRequest>>',
)
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, gettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, messages are properly stored and
retrieved across the full request/redirect/response cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels:
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Messages persist properly when multiple POSTs are made before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
MESSAGE_LEVEL=constants.DEBUG,
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled(self):
"""
When the middleware is disabled, an exception is raised when one
attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
with self.assertRaises(MessageFailure):
self.client.post(add_url, data, follow=True)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled_fail_silently(self):
"""
When the middleware is disabled, an exception is not raised
if 'fail_silently' is True.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertNotIn('messages', response.context)
def stored_messages_count(self, storage, response):
"""
Return the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([
Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2', extra_tags='tag'),
])
def test_existing_read(self):
"""
Reading the existing storage doesn't cause the data to be lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
storage.add(constants.INFO, 'A generic info message', extra_tags=None)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', '', 'extra-tag debug', 'warning', 'error', 'success', 'info'])
def test_level_tag(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.level_tag for msg in storage]
self.assertEqual(tags, ['info', '', 'debug', 'warning', 'error', 'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
})
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', 'custom', 'extra-tag', '', 'bad', 'success'])
| ar4s/django | tests/messages_tests/base.py | Python | bsd-3-clause | 14,187 |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import sys
import unittest
import gyp.generator.ninja as ninja
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
# These cannot run on non-Windows as they require a VS installation to
# correctly handle variable expansion.
if sys.platform.startswith("win"):
writer = ninja.NinjaWriter(
"foo", "wee", ".", ".", "build.ninja", ".", "build.ninja", "win"
)
spec = {"target_name": "wee"}
self.assertTrue(
writer.ComputeOutputFileName(spec, "executable").endswith(".exe")
)
self.assertTrue(
writer.ComputeOutputFileName(spec, "shared_library").endswith(".dll")
)
self.assertTrue(
writer.ComputeOutputFileName(spec, "static_library").endswith(".lib")
)
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter(
"foo", "wee", ".", ".", "build.ninja", ".", "build.ninja", "linux"
)
spec = {"target_name": "wee"}
self.assertTrue("." not in writer.ComputeOutputFileName(spec, "executable"))
self.assertTrue(
writer.ComputeOutputFileName(spec, "shared_library").startswith("lib")
)
self.assertTrue(
writer.ComputeOutputFileName(spec, "static_library").startswith("lib")
)
self.assertTrue(
writer.ComputeOutputFileName(spec, "shared_library").endswith(".so")
)
self.assertTrue(
writer.ComputeOutputFileName(spec, "static_library").endswith(".a")
)
if __name__ == "__main__":
unittest.main()
| onecoolx/picasso | tools/gyp/pylib/gyp/generator/ninja_test.py | Python | bsd-3-clause | 1,909 |
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.sparse.linalg` namespace for importing the functions
# included below.
import warnings
from . import _eigen
__all__ = [ # noqa: F822
'ArpackError', 'ArpackNoConvergence',
'eigs', 'eigsh', 'lobpcg', 'svds', 'arpack', 'test'
]
eigen_modules = ['arpack']
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__ and name not in eigen_modules:
raise AttributeError(
"scipy.sparse.linalg.eigen is deprecated and has no attribute "
f"{name}. Try looking in scipy.sparse.linalg instead.")
if name in eigen_modules:
msg = (f'The module `scipy.sparse.linalg.eigen.{name}` is '
'deprecated. All public names must be imported directly from '
'the `scipy.sparse.linalg` namespace.')
else:
msg = (f"Please use `{name}` from the `scipy.sparse.linalg` namespace,"
" the `scipy.sparse.linalg.eigen` namespace is deprecated.")
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return getattr(_eigen, name)
| scipy/scipy | scipy/sparse/linalg/eigen.py | Python | bsd-3-clause | 1,151 |
"""Implements a widget that displays the player's current commitments."""
| yongwen/makahiki | makahiki/apps/widgets/my_commitments/__init__.py | Python | mit | 74 |
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2010 VoltDB Inc.
#
# VoltDB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# VoltDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
testspec = """
class Database {
/** test comment */
// more comments
Partition* partitions; // more comments
Table* tables;
Program* programs;
Procedure* procedures;
}
/*
class Garbage {
Garbage garbage;
}
*/
class Partition {
bool isActive;
Range* ranges;
Replica* replicas;
}
class Table {
int type;
Table? buddy1;
Table? buddy2;
Column* columns;
Index* indexes;
Constraint* constraints;
}
class Program {
Program* programs;
Procedure* procedures;
Table* tables;
}
"""
def checkeq( a, b ):
if a != b:
raise Exception( 'test failed: %r != %r' % (a,b) )
| apavlo/h-store | src/catgen/catalog_utils/testdata.py | Python | gpl-3.0 | 1,429 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from pootle.core.delegate import config
from pootle.core.plugin import getter
from .exceptions import ConfigurationError
from .models import Config
@getter(config)
def config_getter(**kwargs):
sender = kwargs["sender"]
instance = kwargs.get("instance")
key = kwargs.get("key")
if sender:
if instance is not None and not isinstance(instance, sender):
raise ConfigurationError(
"'instance' must be an instance of 'sender', when specified")
conf = Config.objects.get_config_queryset(instance or sender)
elif instance:
raise ConfigurationError(
"'sender' must be defined when 'instance' is specified")
else:
conf = Config.objects.site_config()
if key is None:
return conf
if isinstance(key, (list, tuple)):
return conf.list_config(key)
try:
return conf.get_config(key)
except Config.MultipleObjectsReturned as e:
raise ConfigurationError(e)
| claudep/pootle | pootle/apps/pootle_config/getters.py | Python | gpl-3.0 | 1,264 |
#!/usr/bin/env python
#
# Copyright 2005,2007,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import blocks_swig as blocks
import random
class test_packing(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block ()
def tearDown(self):
self.tb = None
def test_001(self):
src_data = (0x80,)
expected_results = (1,0,0,0,0,0,0,0)
src = gr.vector_source_b(src_data, False)
op = blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST)
dst = gr.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_002(self):
src_data = (0x80,)
expected_results = (0,0,0,0,0,0,0,1)
src = gr.vector_source_b(src_data, False)
op = blocks.packed_to_unpacked_bb(1, gr.GR_LSB_FIRST)
dst = gr.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_003(self):
src_data = (0x11,)
expected_results = (4, 2)
src = gr.vector_source_b(src_data, False)
op = blocks.packed_to_unpacked_bb(3, gr.GR_LSB_FIRST)
dst = gr.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_004(self):
src_data = (0x11,)
expected_results = (0, 4)
src = gr.vector_source_b(src_data, False)
op = blocks.packed_to_unpacked_bb(3, gr.GR_MSB_FIRST)
dst = gr.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_005(self):
src_data = (1,0,0,0,0,0,1,0,0,1,0,1,1,0,1,0)
expected_results = (0x82, 0x5a)
src = gr.vector_source_b(src_data, False)
op = blocks.unpacked_to_packed_bb(1, gr.GR_MSB_FIRST)
dst = gr.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_006(self):
src_data = (0,1,0,0,0,0,0,1,0,1,0,1,1,0,1,0)
expected_results = (0x82, 0x5a)
src = gr.vector_source_b(src_data, False)
op = blocks.unpacked_to_packed_bb(1, gr.GR_LSB_FIRST)
dst = gr.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_007(self):
src_data = (4, 2, 0,0,0)
expected_results = (0x11,)
src = gr.vector_source_b(src_data, False)
op = blocks.unpacked_to_packed_bb(3, gr.GR_LSB_FIRST)
dst = gr.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_008(self):
src_data = (0, 4, 2,0,0)
expected_results = (0x11,)
src = gr.vector_source_b(src_data,False)
op = blocks.unpacked_to_packed_bb(3, gr.GR_MSB_FIRST)
dst = gr.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_009(self):
random.seed(0)
src_data = []
for i in xrange(202):
src_data.append((random.randint(0,255)))
src_data = tuple(src_data)
expected_results = src_data
src = gr.vector_source_b(tuple(src_data), False)
op1 = blocks.packed_to_unpacked_bb(3, gr.GR_MSB_FIRST)
op2 = blocks.unpacked_to_packed_bb(3, gr.GR_MSB_FIRST)
dst = gr.vector_sink_b()
self.tb.connect(src, op1, op2)
self.tb.connect(op2, dst)
self.tb.run()
self.assertEqual(expected_results[0:201], dst.data())
def test_010(self):
random.seed(0)
src_data = []
for i in xrange(56):
src_data.append((random.randint(0,255)))
src_data = tuple(src_data)
expected_results = src_data
src = gr.vector_source_b(tuple(src_data), False)
op1 = blocks.packed_to_unpacked_bb(7, gr.GR_MSB_FIRST)
op2 = blocks.unpacked_to_packed_bb(7, gr.GR_MSB_FIRST)
dst = gr.vector_sink_b()
self.tb.connect(src, op1, op2)
self.tb.connect(op2, dst)
self.tb.run()
self.assertEqual(expected_results[0:201], dst.data())
def test_011(self):
random.seed(0)
src_data = []
for i in xrange(56):
src_data.append((random.randint(0,255)))
src_data = tuple(src_data)
expected_results = src_data
src = gr.vector_source_b(tuple(src_data),False)
op1 = blocks.packed_to_unpacked_bb(7, gr.GR_LSB_FIRST)
op2 = blocks.unpacked_to_packed_bb(7, gr.GR_LSB_FIRST)
dst = gr.vector_sink_b()
self.tb.connect(src, op1, op2)
self.tb.connect(op2, dst)
self.tb.run()
self.assertEqual(expected_results[0:201], dst.data())
# tests on shorts
def test_100a(self):
random.seed(0)
src_data = []
for i in xrange(100):
src_data.append((random.randint(-2**15,2**15-1)))
src_data = tuple(src_data)
expected_results = src_data
src = gr.vector_source_s(tuple(src_data), False)
op1 = blocks.packed_to_unpacked_ss(1, gr.GR_MSB_FIRST)
op2 = blocks.unpacked_to_packed_ss(1, gr.GR_MSB_FIRST)
dst = gr.vector_sink_s()
self.tb.connect(src, op1, op2)
self.tb.connect(op2, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_100b(self):
random.seed(0)
src_data = []
for i in xrange(100):
src_data.append((random.randint(-2**15,2**15-1)))
src_data = tuple(src_data)
expected_results = src_data
src = gr.vector_source_s(tuple(src_data), False)
op1 = blocks.packed_to_unpacked_ss(1, gr.GR_LSB_FIRST)
op2 = blocks.unpacked_to_packed_ss(1, gr.GR_LSB_FIRST)
dst = gr.vector_sink_s()
self.tb.connect(src, op1, op2)
self.tb.connect(op2, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_101a(self):
random.seed(0)
src_data = []
for i in xrange(100):
src_data.append((random.randint(-2**15,2**15-1)))
src_data = tuple(src_data)
expected_results = src_data
src = gr.vector_source_s(tuple(src_data), False)
op1 = blocks.packed_to_unpacked_ss(8, gr.GR_MSB_FIRST)
op2 = blocks.unpacked_to_packed_ss(8, gr.GR_MSB_FIRST)
dst = gr.vector_sink_s()
self.tb.connect(src, op1, op2)
self.tb.connect(op2, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_101b(self):
random.seed(0)
src_data = []
for i in xrange(100):
src_data.append((random.randint(-2**15,2**15-1)))
src_data = tuple(src_data)
expected_results = src_data
src = gr.vector_source_s(tuple(src_data), False)
op1 = blocks.packed_to_unpacked_ss(8, gr.GR_LSB_FIRST)
op2 = blocks.unpacked_to_packed_ss(8, gr.GR_LSB_FIRST)
dst = gr.vector_sink_s()
self.tb.connect(src, op1, op2)
self.tb.connect(op2, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
# tests on ints
def test_200a(self):
random.seed(0)
src_data = []
for i in xrange(100):
src_data.append((random.randint(-2**31,2**31-1)))
src_data = tuple(src_data)
expected_results = src_data
src = gr.vector_source_i(tuple(src_data), False)
op1 = blocks.packed_to_unpacked_ii(1, gr.GR_MSB_FIRST)
op2 = blocks.unpacked_to_packed_ii(1, gr.GR_MSB_FIRST)
dst = gr.vector_sink_i()
self.tb.connect(src, op1, op2)
self.tb.connect(op2, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_200b(self):
random.seed(0)
src_data = []
for i in xrange(100):
src_data.append((random.randint(-2**31,2**31-1)))
src_data = tuple(src_data)
expected_results = src_data
src = gr.vector_source_i(tuple(src_data), False)
op1 = blocks.packed_to_unpacked_ii(1, gr.GR_LSB_FIRST)
op2 = blocks.unpacked_to_packed_ii(1, gr.GR_LSB_FIRST)
dst = gr.vector_sink_i()
self.tb.connect(src, op1, op2)
self.tb.connect(op2, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_201a(self):
random.seed(0)
src_data = []
for i in xrange(100):
src_data.append((random.randint(-2**31,2**31-1)))
src_data = tuple(src_data)
expected_results = src_data
src = gr.vector_source_i(tuple(src_data), False)
op1 = blocks.packed_to_unpacked_ii(8, gr.GR_MSB_FIRST)
op2 = blocks.unpacked_to_packed_ii(8, gr.GR_MSB_FIRST)
dst = gr.vector_sink_i()
self.tb.connect(src, op1, op2)
self.tb.connect(op2, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_201b(self):
random.seed(0)
src_data = []
for i in xrange(100):
src_data.append((random.randint(-2**31,2**31-1)))
src_data = tuple(src_data)
expected_results = src_data
src = gr.vector_source_i(tuple(src_data), False)
op1 = blocks.packed_to_unpacked_ii(8, gr.GR_LSB_FIRST)
op2 = blocks.unpacked_to_packed_ii(8, gr.GR_LSB_FIRST)
dst = gr.vector_sink_i()
self.tb.connect(src, op1, op2)
self.tb.connect(op2, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
if __name__ == '__main__':
gr_unittest.run(test_packing, "test_packing.xml")
| gnu-sandhi/sandhi | modules/gr36/gr-blocks/python/qa_packed_to_unpacked.py | Python | gpl-3.0 | 10,891 |
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2012, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
#
# DISCLAIMER
#
# netaddr is not sponsored nor endorsed by IANA.
#
# Use of data from IANA (Internet Assigned Numbers Authority) is subject to
# copyright and is provided with prior written permission.
#
# IANA data files included with netaddr are not modified in any way but are
# parsed and made available to end users through an API.
#
# See README file and source code for URLs to latest copies of the relevant
# files.
#
#-----------------------------------------------------------------------------
"""
Routines for accessing data published by IANA (Internet Assigned Numbers
Authority).
More details can be found at the following URLs :-
- IANA Home Page - http://www.iana.org/
- IEEE Protocols Information Home Page - http://www.iana.org/protocols/
"""
import os as _os
import os.path as _path
import sys as _sys
import re as _re
from xml.sax import make_parser, handler
from netaddr.core import Publisher, Subscriber, PrettyPrinter, dos2unix
from netaddr.ip import IPAddress, IPNetwork, IPRange, \
cidr_abbrev_to_verbose, iprange_to_cidrs
from netaddr.compat import _dict_items, _callable
#-----------------------------------------------------------------------------
#: Topic based lookup dictionary for IANA information.
IANA_INFO = {
'IPv4' : {},
'IPv6' : {},
'multicast' : {},
}
#-----------------------------------------------------------------------------
class SaxRecordParser(handler.ContentHandler):
def __init__(self, callback=None):
self._level = 0
self._is_active = False
self._record = None
self._tag_level = None
self._tag_payload = None
self._tag_feeding = None
self._callback = callback
def startElement(self, name, attrs):
self._level += 1
if self._is_active is False:
if name == 'record':
self._is_active = True
self._tag_level = self._level
self._record = {}
if 'date' in attrs:
self._record['date'] = attrs['date']
elif self._level == self._tag_level + 1:
if name == 'xref':
if 'type' in attrs and 'data' in attrs:
l = self._record.setdefault(attrs['type'], [])
l.append(attrs['data'])
else:
self._tag_payload = []
self._tag_feeding = True
else:
self._tag_feeding = False
def endElement(self, name):
if self._is_active is True:
if name == 'record' and self._tag_level == self._level:
self._is_active = False
self._tag_level = None
if _callable(self._callback):
self._callback(self._record)
self._record = None
elif self._level == self._tag_level + 1:
if name != 'xref':
self._record[name] = ''.join(self._tag_payload)
self._tag_payload = None
self._tag_feeding = False
self._level -= 1
def characters(self, content):
if self._tag_feeding is True:
self._tag_payload.append(content)
class XMLRecordParser(Publisher):
"""
A configurable Parser that understands how to parse XML based records.
"""
def __init__(self, fh, **kwargs):
"""
Constructor.
fh - a valid, open file handle to XML based record data.
"""
super(XMLRecordParser, self).__init__()
self.xmlparser = make_parser()
self.xmlparser.setContentHandler(SaxRecordParser(self.consume_record))
self.fh = fh
self.__dict__.update(kwargs)
def process_record(self, rec):
"""
This is the callback method invoked for every record. It is usually
over-ridden by base classes to provide specific record-based logic.
Any record can be vetoed (not passed to registered Subscriber objects)
by simply returning None.
"""
return rec
def consume_record(self, rec):
record = self.process_record(rec)
if record is not None:
self.notify(record)
def parse(self):
"""
Parse and normalises records, notifying registered subscribers with
record data as it is encountered.
"""
self.xmlparser.parse(self.fh)
#-----------------------------------------------------------------------------
class IPv4Parser(XMLRecordParser):
"""
A XMLRecordParser that understands how to parse and retrieve data records
from the IANA IPv4 address space file.
It can be found online here :-
- http://www.iana.org/assignments/ipv4-address-space/ipv4-address-space.xml
"""
def __init__(self, fh, **kwargs):
"""
Constructor.
fh - a valid, open file handle to an IANA IPv4 address space file.
kwargs - additional parser options.
"""
super(IPv4Parser, self).__init__(fh)
def process_record(self, rec):
"""
Callback method invoked for every record.
See base class method for more details.
"""
record = {}
for key in ('prefix', 'designation', 'date', 'whois', 'status'):
record[key] = str(rec.get(key, '')).strip()
# Strip leading zeros from octet.
if '/' in record['prefix']:
(octet, prefix) = record['prefix'].split('/')
record['prefix'] = '%d/%d' % (int(octet), int(prefix))
record['status'] = record['status'].capitalize()
return record
#-----------------------------------------------------------------------------
class IPv6Parser(XMLRecordParser):
"""
A XMLRecordParser that understands how to parse and retrieve data records
from the IANA IPv6 address space file.
It can be found online here :-
- http://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xml
"""
def __init__(self, fh, **kwargs):
"""
Constructor.
fh - a valid, open file handle to an IANA IPv6 address space file.
kwargs - additional parser options.
"""
super(IPv6Parser, self).__init__(fh)
def process_record(self, rec):
"""
Callback method invoked for every record.
See base class method for more details.
"""
record = {
'prefix': str(rec.get('prefix', '')).strip(),
'allocation': str(rec.get('description', '')).strip(),
'reference': str(rec.get('rfc', [''])[0]).strip(),
}
return record
#-----------------------------------------------------------------------------
class MulticastParser(XMLRecordParser):
"""
A XMLRecordParser that knows how to process the IANA IPv4 multicast address
allocation file.
It can be found online here :-
- http://www.iana.org/assignments/multicast-addresses/multicast-addresses.xml
"""
def __init__(self, fh, **kwargs):
"""
Constructor.
fh - a valid, open file handle to an IANA IPv4 multicast address
allocation file.
kwargs - additional parser options.
"""
super(MulticastParser, self).__init__(fh)
def normalise_addr(self, addr):
"""
Removes variations from address entries found in this particular file.
"""
if '-' in addr:
(a1, a2) = addr.split('-')
o1 = a1.strip().split('.')
o2 = a2.strip().split('.')
return '%s-%s' % ('.'.join([str(int(i)) for i in o1]),
'.'.join([str(int(i)) for i in o2]))
else:
o1 = addr.strip().split('.')
return '.'.join([str(int(i)) for i in o1])
def process_record(self, rec):
"""
Callback method invoked for every record.
See base class method for more details.
"""
if 'addr' in rec:
record = {
'address': self.normalise_addr(str(rec['addr'])),
'descr': str(rec.get('description', '')),
}
return record
#-----------------------------------------------------------------------------
class DictUpdater(Subscriber):
"""
Concrete Subscriber that inserts records received from a Publisher into a
dictionary.
"""
def __init__(self, dct, topic, unique_key):
"""
Constructor.
dct - lookup dict or dict like object to insert records into.
topic - high-level category name of data to be processed.
unique_key - key name in data dict that uniquely identifies it.
"""
self.dct = dct
self.topic = topic
self.unique_key = unique_key
def update(self, data):
"""
Callback function used by Publisher to notify this Subscriber about
an update. Stores topic based information into dictionary passed to
constructor.
"""
data_id = data[self.unique_key]
if self.topic == 'IPv4':
cidr = IPNetwork(cidr_abbrev_to_verbose(data_id))
self.dct[cidr] = data
elif self.topic == 'IPv6':
cidr = IPNetwork(cidr_abbrev_to_verbose(data_id))
self.dct[cidr] = data
elif self.topic == 'multicast':
iprange = None
if '-' in data_id:
# See if we can manage a single CIDR.
(first, last) = data_id.split('-')
iprange = IPRange(first, last)
cidrs = iprange.cidrs()
if len(cidrs) == 1:
iprange = cidrs[0]
else:
iprange = IPAddress(data_id)
self.dct[iprange] = data
#-----------------------------------------------------------------------------
def load_info():
"""
Parse and load internal IANA data lookups with the latest information from
data files.
"""
PATH = _path.dirname(__file__)
ipv4 = IPv4Parser(open(_path.join(PATH, 'ipv4-address-space.xml')))
ipv4.attach(DictUpdater(IANA_INFO['IPv4'], 'IPv4', 'prefix'))
ipv4.parse()
ipv6 = IPv6Parser(open(_path.join(PATH, 'ipv6-address-space.xml')))
ipv6.attach(DictUpdater(IANA_INFO['IPv6'], 'IPv6', 'prefix'))
ipv6.parse()
mcast = MulticastParser(open(_path.join(PATH, 'multicast-addresses.xml')))
mcast.attach(DictUpdater(IANA_INFO['multicast'], 'multicast', 'address'))
mcast.parse()
#-----------------------------------------------------------------------------
def pprint_info(fh=None):
"""
Pretty prints IANA information to filehandle.
"""
if fh is None:
fh = _sys.stdout
for category in sorted(IANA_INFO):
fh.write('-' * len(category) + "\n")
fh.write(category + "\n")
fh.write('-' * len(category) + "\n")
ipranges = IANA_INFO[category]
for iprange in sorted(ipranges):
details = ipranges[iprange]
fh.write('%-45r' % (iprange) + details + "\n")
#-----------------------------------------------------------------------------
def query(ip_addr):
"""
Returns informational data specific to this IP address.
"""
info = {}
def within_bounds(ip, ip_range):
# Boundary checking for multiple IP classes.
if hasattr(ip_range, 'first'):
# IP network or IP range.
return ip in ip_range
elif hasattr(ip_range, 'value'):
# IP address.
return ip == ip_range
raise Exception('Unsupported IP range or address: %r!' % ip_range)
if ip_addr.version == 4:
for cidr, record in _dict_items(IANA_INFO['IPv4']):
if within_bounds(ip_addr, cidr):
info.setdefault('IPv4', [])
info['IPv4'].append(record)
if ip_addr.is_multicast():
for iprange, record in _dict_items(IANA_INFO['multicast']):
if within_bounds(ip_addr, iprange):
info.setdefault('Multicast', [])
info['Multicast'].append(record)
elif ip_addr.version == 6:
for cidr, record in _dict_items(IANA_INFO['IPv6']):
if within_bounds(ip_addr, cidr):
info.setdefault('IPv6', [])
info['IPv6'].append(record)
return info
#-----------------------------------------------------------------------------
def get_latest_files():
"""Download the latest files from IANA"""
if _sys.version_info[0] == 3:
# Python 3.x
from urllib.request import Request, urlopen
else:
# Python 2.x
from urllib2 import Request, urlopen
urls = [
'http://www.iana.org/assignments/ipv4-address-space/ipv4-address-space.xml',
'http://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xml',
'http://www.iana.org/assignments/multicast-addresses/multicast-addresses.xml',
]
for url in urls:
_sys.stdout.write('downloading latest copy of %s\n' % url)
request = Request(url)
response = urlopen(request)
save_path = _path.dirname(__file__)
basename = _os.path.basename(response.geturl().rstrip('/'))
filename = _path.join(save_path, basename)
fh = open(filename, 'wb')
fh.write(response.read())
fh.close()
# Make sure the line endings are consistent across platforms.
dos2unix(filename)
#-----------------------------------------------------------------------------
if __name__ == '__main__':
# Generate indices when module is executed as a script.
get_latest_files()
# On module import, read IANA data files and populate lookups dict.
load_info()
| ecolitan/fatics | venv/lib/python2.7/site-packages/netaddr/ip/iana.py | Python | agpl-3.0 | 14,149 |
from gi.repository import Gtk
"""
Since GTK+3 Gtk.CellRenderer doesn't have a destroy signal anymore.
We can do the cleanup in the python destructor method instead.
"""
class MyCellRenderer(Gtk.CellRenderer):
def __init__(self):
Gtk.CellRenderer.__init__(self)
def __del__(self):
print "cellrenderer destroy"
def do_render(self, cairo_t, widget, background_area, cell_area, flags):
pass
def window_destroy_cb(*kwargs):
print "window destroy"
Gtk.main_quit()
window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
window.connect("destroy", window_destroy_cb)
window.show()
def treeview_destroy_cb(*kwargs):
print "treeview destroy"
treeview = Gtk.TreeView()
treeview.connect("destroy", treeview_destroy_cb)
window.add(treeview)
treeview.show()
col = Gtk.TreeViewColumn()
treeview.append_column(col)
cel = MyCellRenderer()
col.pack_start(cel, expand=True)
Gtk.main()
| gusDuarte/sugar-toolkit-gtk3 | tests/graphics/customdestroy.py | Python | lgpl-2.1 | 918 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves.urllib.parse import quote
import unittest
import os
from tempfile import mkdtemp
import shutil
from swift.common.storage_policy import StoragePolicy
from swift.common.swob import Request
from swift.common.utils import mkdirs, split_path
from swift.common.wsgi import monkey_patch_mimetools, WSGIContext
from swift.obj import server as object_server
from swift.proxy import server as proxy
import swift.proxy.controllers
from test.unit import FakeMemcache, debug_logger, FakeRing, \
fake_http_connect, patch_policies
class FakeServerConnection(WSGIContext):
'''Fakes an HTTPConnection to a server instance.'''
def __init__(self, app):
super(FakeServerConnection, self).__init__(app)
self.data = ''
def getheaders(self):
return self._response_headers
def read(self, amt=None):
try:
result = next(self.resp_iter)
return result
except StopIteration:
return ''
def getheader(self, name, default=None):
result = self._response_header_value(name)
return result if result else default
def getresponse(self):
environ = {'REQUEST_METHOD': self.method}
req = Request.blank(self.path, environ, headers=self.req_headers,
body=self.data)
self.data = ''
self.resp = self._app_call(req.environ)
self.resp_iter = iter(self.resp)
if self._response_headers is None:
self._response_headers = []
status_parts = self._response_status.split(' ', 1)
self.status = int(status_parts[0])
self.reason = status_parts[1] if len(status_parts) == 2 else ''
return self
def getexpect(self):
class ContinueResponse(object):
status = 100
return ContinueResponse()
def send(self, data):
self.data += data
def close(self):
pass
def __call__(self, ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
self.path = quote('/' + device + '/' + str(partition) + path)
self.method = method
self.req_headers = headers
return self
def get_http_connect(account_func, container_func, object_func):
'''Returns a http_connect function that delegates to
entity-specific http_connect methods based on request path.
'''
def http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
a, c, o = split_path(path, 1, 3, True)
if o:
func = object_func
elif c:
func = container_func
else:
func = account_func
resp = func(ipaddr, port, device, partition, method, path,
headers=headers, query_string=query_string)
return resp
return http_connect
@patch_policies([StoragePolicy(0, 'zero', True,
object_ring=FakeRing(replicas=1))])
class TestObjectSysmeta(unittest.TestCase):
'''Tests object sysmeta is correctly handled by combination
of proxy server and object server.
'''
def _assertStatus(self, resp, expected):
self.assertEqual(resp.status_int, expected,
'Expected %d, got %s'
% (expected, resp.status))
def _assertInHeaders(self, resp, expected):
for key, val in expected.items():
self.assertTrue(key in resp.headers,
'Header %s missing from %s' % (key, resp.headers))
self.assertEqual(val, resp.headers[key],
'Expected header %s:%s, got %s:%s'
% (key, val, key, resp.headers[key]))
def _assertNotInHeaders(self, resp, unexpected):
for key, val in unexpected.items():
self.assertFalse(key in resp.headers,
'Header %s not expected in %s'
% (key, resp.headers))
def setUp(self):
self.app = proxy.Application(None, FakeMemcache(),
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(replicas=1),
container_ring=FakeRing(replicas=1))
monkey_patch_mimetools()
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,
'tmp_test_object_server_ObjectController')
mkdirs(os.path.join(self.testdir, 'sda', 'tmp'))
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.obj_ctlr = object_server.ObjectController(
conf, logger=debug_logger('obj-ut'))
http_connect = get_http_connect(fake_http_connect(200),
fake_http_connect(200),
FakeServerConnection(self.obj_ctlr))
self.orig_base_http_connect = swift.proxy.controllers.base.http_connect
self.orig_obj_http_connect = swift.proxy.controllers.obj.http_connect
swift.proxy.controllers.base.http_connect = http_connect
swift.proxy.controllers.obj.http_connect = http_connect
def tearDown(self):
shutil.rmtree(self.tmpdir)
swift.proxy.controllers.base.http_connect = self.orig_base_http_connect
swift.proxy.controllers.obj.http_connect = self.orig_obj_http_connect
original_sysmeta_headers_1 = {'x-object-sysmeta-test0': 'val0',
'x-object-sysmeta-test1': 'val1'}
original_sysmeta_headers_2 = {'x-object-sysmeta-test2': 'val2'}
changed_sysmeta_headers = {'x-object-sysmeta-test0': '',
'x-object-sysmeta-test1': 'val1 changed'}
new_sysmeta_headers = {'x-object-sysmeta-test3': 'val3'}
original_meta_headers_1 = {'x-object-meta-test0': 'meta0',
'x-object-meta-test1': 'meta1'}
original_meta_headers_2 = {'x-object-meta-test2': 'meta2'}
changed_meta_headers = {'x-object-meta-test0': '',
'x-object-meta-test1': 'meta1 changed'}
new_meta_headers = {'x-object-meta-test3': 'meta3'}
bad_headers = {'x-account-sysmeta-test1': 'bad1'}
def test_PUT_sysmeta_then_GET(self):
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
req = Request.blank(path, environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.original_sysmeta_headers_1)
self._assertInHeaders(resp, self.original_meta_headers_1)
self._assertNotInHeaders(resp, self.bad_headers)
def test_PUT_sysmeta_then_HEAD(self):
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'HEAD'}
req = Request.blank(path, environ=env)
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.original_sysmeta_headers_1)
self._assertInHeaders(resp, self.original_meta_headers_1)
self._assertNotInHeaders(resp, self.bad_headers)
def test_sysmeta_replaced_by_PUT(self):
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_sysmeta_headers_2)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.original_meta_headers_2)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.changed_meta_headers)
hdrs.update(self.new_meta_headers)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
req = Request.blank(path, environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertNotInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertNotInHeaders(resp, self.original_meta_headers_2)
def _test_sysmeta_not_updated_by_POST(self):
# check sysmeta is not changed by a POST but user meta is replaced
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_meta_headers_1)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'POST'}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.changed_meta_headers)
hdrs.update(self.new_meta_headers)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs)
resp = req.get_response(self.app)
self._assertStatus(resp, 202)
req = Request.blank(path, environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.original_sysmeta_headers_1)
self._assertNotInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertNotInHeaders(resp, self.bad_headers)
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
req = Request.blank(path, environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertNotInHeaders(resp, self.original_sysmeta_headers_2)
def test_sysmeta_not_updated_by_POST(self):
self.app.object_post_as_copy = False
self._test_sysmeta_not_updated_by_POST()
def test_sysmeta_not_updated_by_POST_as_copy(self):
self.app.object_post_as_copy = True
self._test_sysmeta_not_updated_by_POST()
def test_sysmeta_updated_by_COPY(self):
# check sysmeta is updated by a COPY in same way as user meta
path = '/v1/a/c/o'
dest = '/c/o2'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_sysmeta_headers_2)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.original_meta_headers_2)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'COPY'}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.changed_meta_headers)
hdrs.update(self.new_meta_headers)
hdrs.update(self.bad_headers)
hdrs.update({'Destination': dest})
req = Request.blank(path, environ=env, headers=hdrs)
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertInHeaders(resp, self.original_meta_headers_2)
self._assertNotInHeaders(resp, self.bad_headers)
req = Request.blank('/v1/a/c/o2', environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertInHeaders(resp, self.original_meta_headers_2)
self._assertNotInHeaders(resp, self.bad_headers)
def test_sysmeta_updated_by_COPY_from(self):
# check sysmeta is updated by a COPY in same way as user meta
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_sysmeta_headers_2)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.original_meta_headers_2)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.changed_meta_headers)
hdrs.update(self.new_meta_headers)
hdrs.update(self.bad_headers)
hdrs.update({'X-Copy-From': '/c/o'})
req = Request.blank('/v1/a/c/o2', environ=env, headers=hdrs, body='')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertInHeaders(resp, self.original_meta_headers_2)
self._assertNotInHeaders(resp, self.bad_headers)
req = Request.blank('/v1/a/c/o2', environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertInHeaders(resp, self.original_meta_headers_2)
self._assertNotInHeaders(resp, self.bad_headers)
| bkolli/swift | test/unit/proxy/test_sysmeta.py | Python | apache-2.0 | 16,039 |
import h2o, h2o_config
l = h2o_config.setup_test_config(test_config_json='test_config.json')
print "\nsetup_test_config returns list of test config objs:", l
# Here are some ways to reference the config state that the json created
print "\nHow to reference.."
for i, obj in enumerate(h2o_config.configs):
print "keys in config", i, ":", obj.__dict__.keys()
print h2o_config.configs[0].trees
for t in h2o_config.configs:
print "\nTest config_name:", t.config_name
print "trees:", t.trees
print "params:", t.params
print "params['timeoutSecs']:", t.params['timeoutSecs']
| 111t8e/h2o-2 | py/test_config_basic.py | Python | apache-2.0 | 597 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for representing a contact with multiple phone numbers.
This module provides models with a relationship with ndb.KeyProperty to
allow a single contact to have multiple phone numbers.
For more information, see README.md.
"""
# In the original article, it uses ReferenceProperty on the
# PhoneNumber model. With ndb, there is no ReferenceProperty any more,
# so here we use KeyProperty first. However this pattern has a
# consistency issue, shown in the test_fails function in
# test/test_keyproperty_models.py.
from google.appengine.ext import ndb
# [START keyproperty_models]
class Contact(ndb.Model):
"""A Contact model with KeyProperty."""
# Basic info.
name = ndb.StringProperty()
birth_day = ndb.DateProperty()
# Address info.
address = ndb.StringProperty()
# Company info.
company_title = ndb.StringProperty()
company_name = ndb.StringProperty()
company_description = ndb.TextProperty()
company_address = ndb.StringProperty()
# The original phone_number property has been replaced by
# the following property.
@property
def phone_numbers(self):
return PhoneNumber.query(PhoneNumber.contact == self.key)
class PhoneNumber(ndb.Model):
"""A model representing a phone number."""
contact = ndb.KeyProperty(Contact)
phone_type = ndb.StringProperty(
choices=('home', 'work', 'fax', 'mobile', 'other'))
number = ndb.StringProperty()
# [END keyproperty_models]
| clarko1/Cramd | appengine/standard/ndb/modeling/keyproperty_models.py | Python | apache-2.0 | 2,072 |
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova import exception
from nova import objects
from nova import test
class TestImageMeta(test.NoDBTestCase):
def test_basic_attrs(self):
image = {'status': 'active',
'container_format': 'bare',
'min_ram': 0,
'updated_at': '2014-12-12T11:16:36.000000',
# Testing string -> int conversion
'min_disk': '0',
'owner': '2d8b9502858c406ebee60f0849486222',
# Testing string -> bool conversion
'protected': 'yes',
'properties': {
'os_type': 'Linux',
'hw_video_model': 'vga',
'hw_video_ram': '512',
'hw_qemu_guest_agent': 'yes',
'hw_scsi_model': 'virtio-scsi',
},
'size': 213581824,
'name': 'f16-x86_64-openstack-sda',
'checksum': '755122332caeb9f661d5c978adb8b45f',
'created_at': '2014-12-10T16:23:14.000000',
'disk_format': 'qcow2',
'id': 'c8b1790e-a07d-4971-b137-44f2432936cd'
}
image_meta = objects.ImageMeta.from_dict(image)
self.assertEqual('active', image_meta.status)
self.assertEqual('bare', image_meta.container_format)
self.assertEqual(0, image_meta.min_ram)
self.assertIsInstance(image_meta.updated_at, datetime.datetime)
self.assertEqual(0, image_meta.min_disk)
self.assertEqual('2d8b9502858c406ebee60f0849486222', image_meta.owner)
self.assertTrue(image_meta.protected)
self.assertEqual(213581824, image_meta.size)
self.assertEqual('f16-x86_64-openstack-sda', image_meta.name)
self.assertEqual('755122332caeb9f661d5c978adb8b45f',
image_meta.checksum)
self.assertIsInstance(image_meta.created_at, datetime.datetime)
self.assertEqual('qcow2', image_meta.disk_format)
self.assertEqual('c8b1790e-a07d-4971-b137-44f2432936cd', image_meta.id)
self.assertIsInstance(image_meta.properties, objects.ImageMetaProps)
def test_no_props(self):
image_meta = objects.ImageMeta.from_dict({})
self.assertIsInstance(image_meta.properties, objects.ImageMetaProps)
def test_volume_backed_image(self):
image = {'container_format': None,
'size': 0,
'checksum': None,
'disk_format': None,
}
image_meta = objects.ImageMeta.from_dict(image)
self.assertEqual('', image_meta.container_format)
self.assertEqual(0, image_meta.size)
self.assertEqual('', image_meta.checksum)
self.assertEqual('', image_meta.disk_format)
def test_null_substitution(self):
image = {'name': None,
'checksum': None,
'owner': None,
'size': None,
'virtual_size': None,
'container_format': None,
'disk_format': None,
}
image_meta = objects.ImageMeta.from_dict(image)
self.assertEqual('', image_meta.name)
self.assertEqual('', image_meta.checksum)
self.assertEqual('', image_meta.owner)
self.assertEqual(0, image_meta.size)
self.assertEqual(0, image_meta.virtual_size)
self.assertEqual('', image_meta.container_format)
self.assertEqual('', image_meta.disk_format)
class TestImageMetaProps(test.NoDBTestCase):
def test_normal_props(self):
props = {'os_type': 'windows',
'hw_video_model': 'vga',
'hw_video_ram': '512',
'hw_qemu_guest_agent': 'yes',
# Fill sane values for the rest here
}
virtprops = objects.ImageMetaProps.from_dict(props)
self.assertEqual('windows', virtprops.os_type)
self.assertEqual('vga', virtprops.hw_video_model)
self.assertEqual(512, virtprops.hw_video_ram)
self.assertTrue(virtprops.hw_qemu_guest_agent)
def test_default_props(self):
props = {}
virtprops = objects.ImageMetaProps.from_dict(props)
for prop in virtprops.fields:
self.assertIsNone(virtprops.get(prop))
def test_default_prop_value(self):
props = {}
virtprops = objects.ImageMetaProps.from_dict(props)
self.assertEqual("hvm", virtprops.get("hw_vm_mode", "hvm"))
def test_non_existent_prop(self):
props = {}
virtprops = objects.ImageMetaProps.from_dict(props)
self.assertRaises(AttributeError,
virtprops.get,
"doesnotexist")
def test_legacy_compat(self):
legacy_props = {
'architecture': 'x86_64',
'owner_id': '123',
'vmware_adaptertype': 'lsiLogic',
'vmware_disktype': 'preallocated',
'vmware_image_version': '2',
'vmware_ostype': 'rhel3_64Guest',
'auto_disk_config': 'yes',
'ipxe_boot': 'yes',
'xenapi_device_id': '3',
'xenapi_image_compression_level': '2',
'vmware_linked_clone': 'false',
'xenapi_use_agent': 'yes',
'xenapi_skip_agent_inject_ssh': 'no',
'xenapi_skip_agent_inject_files_at_boot': 'no',
'cache_in_nova': 'yes',
'vm_mode': 'hvm',
'bittorrent': 'yes',
'mappings': [],
'block_device_mapping': [],
'bdm_v2': 'yes',
'root_device_name': '/dev/vda',
'hypervisor_version_requires': '>=1.5.3',
'hypervisor_type': 'qemu',
}
image_meta = objects.ImageMetaProps.from_dict(legacy_props)
self.assertEqual('x86_64', image_meta.hw_architecture)
self.assertEqual('123', image_meta.img_owner_id)
self.assertEqual('lsilogic', image_meta.hw_scsi_model)
self.assertEqual('preallocated', image_meta.hw_disk_type)
self.assertEqual(2, image_meta.img_version)
self.assertEqual('rhel3_64Guest', image_meta.os_distro)
self.assertTrue(image_meta.hw_auto_disk_config)
self.assertTrue(image_meta.hw_ipxe_boot)
self.assertEqual(3, image_meta.hw_device_id)
self.assertEqual(2, image_meta.img_compression_level)
self.assertFalse(image_meta.img_linked_clone)
self.assertTrue(image_meta.img_use_agent)
self.assertFalse(image_meta.os_skip_agent_inject_ssh)
self.assertFalse(image_meta.os_skip_agent_inject_files_at_boot)
self.assertTrue(image_meta.img_cache_in_nova)
self.assertTrue(image_meta.img_bittorrent)
self.assertEqual([], image_meta.img_mappings)
self.assertEqual([], image_meta.img_block_device_mapping)
self.assertTrue(image_meta.img_bdm_v2)
self.assertEqual("/dev/vda", image_meta.img_root_device_name)
self.assertEqual('>=1.5.3', image_meta.img_hv_requested_version)
self.assertEqual('qemu', image_meta.img_hv_type)
def test_legacy_compat_vmware_adapter_types(self):
legacy_types = ['lsiLogic', 'busLogic', 'ide', 'lsiLogicsas',
'paraVirtual', None, '']
for legacy_type in legacy_types:
legacy_props = {
'vmware_adaptertype': legacy_type,
}
image_meta = objects.ImageMetaProps.from_dict(legacy_props)
if legacy_type == 'ide':
self.assertEqual('ide', image_meta.hw_disk_bus)
elif not legacy_type:
self.assertFalse(image_meta.obj_attr_is_set('hw_disk_bus'))
self.assertFalse(image_meta.obj_attr_is_set('hw_scsi_model'))
else:
self.assertEqual('scsi', image_meta.hw_disk_bus)
if legacy_type == 'lsiLogicsas':
expected = 'lsisas1068'
elif legacy_type == 'paraVirtual':
expected = 'vmpvscsi'
else:
expected = legacy_type.lower()
self.assertEqual(expected, image_meta.hw_scsi_model)
def test_duplicate_legacy_and_normal_props(self):
# Both keys are referring to the same object field
props = {'hw_scsi_model': 'virtio-scsi',
'vmware_adaptertype': 'lsiLogic',
}
virtprops = objects.ImageMetaProps.from_dict(props)
# The normal property always wins vs. the legacy field since
# _set_attr_from_current_names is called finally
self.assertEqual('virtio-scsi', virtprops.hw_scsi_model)
def test_get(self):
props = objects.ImageMetaProps(os_distro='linux')
self.assertEqual('linux', props.get('os_distro'))
self.assertIsNone(props.get('img_version'))
self.assertEqual(1, props.get('img_version', 1))
def test_set_numa_mem(self):
props = {'hw_numa_nodes': 2,
'hw_numa_mem.0': "2048",
'hw_numa_mem.1': "4096"}
virtprops = objects.ImageMetaProps.from_dict(props)
self.assertEqual(2, virtprops.hw_numa_nodes)
self.assertEqual([2048, 4096], virtprops.hw_numa_mem)
def test_set_numa_mem_sparse(self):
props = {'hw_numa_nodes': 2,
'hw_numa_mem.0': "2048",
'hw_numa_mem.1': "1024",
'hw_numa_mem.3': "4096"}
virtprops = objects.ImageMetaProps.from_dict(props)
self.assertEqual(2, virtprops.hw_numa_nodes)
self.assertEqual([2048, 1024], virtprops.hw_numa_mem)
def test_set_numa_mem_no_count(self):
props = {'hw_numa_mem.0': "2048",
'hw_numa_mem.3': "4096"}
virtprops = objects.ImageMetaProps.from_dict(props)
self.assertIsNone(virtprops.get("hw_numa_nodes"))
self.assertEqual([2048], virtprops.hw_numa_mem)
def test_set_numa_cpus(self):
props = {'hw_numa_nodes': 2,
'hw_numa_cpus.0': "0-3",
'hw_numa_cpus.1': "4-7"}
virtprops = objects.ImageMetaProps.from_dict(props)
self.assertEqual(2, virtprops.hw_numa_nodes)
self.assertEqual([set([0, 1, 2, 3]), set([4, 5, 6, 7])],
virtprops.hw_numa_cpus)
def test_set_numa_cpus_sparse(self):
props = {'hw_numa_nodes': 4,
'hw_numa_cpus.0': "0-3",
'hw_numa_cpus.1': "4,5",
'hw_numa_cpus.3': "6-7"}
virtprops = objects.ImageMetaProps.from_dict(props)
self.assertEqual(4, virtprops.hw_numa_nodes)
self.assertEqual([set([0, 1, 2, 3]), set([4, 5])],
virtprops.hw_numa_cpus)
def test_set_numa_cpus_no_count(self):
props = {'hw_numa_cpus.0': "0-3",
'hw_numa_cpus.3': "4-7"}
virtprops = objects.ImageMetaProps.from_dict(props)
self.assertIsNone(virtprops.get("hw_numa_nodes"))
self.assertEqual([set([0, 1, 2, 3])],
virtprops.hw_numa_cpus)
def test_obj_make_compatible(self):
props = {
'img_config_drive': 'mandatory',
'os_admin_user': 'root',
'hw_vif_multiqueue_enabled': True,
'img_hv_type': 'kvm',
'img_hv_requested_version': '>= 1.0',
'os_require_quiesce': True,
}
obj = objects.ImageMetaProps(**props)
primitive = obj.obj_to_primitive('1.0')
self.assertFalse(any([x in primitive['nova_object.data']
for x in props]))
for bus in ('lxc', 'uml'):
obj.hw_disk_bus = bus
self.assertRaises(exception.ObjectActionError,
obj.obj_to_primitive, '1.0')
| zhimin711/nova | nova/tests/unit/objects/test_image_meta.py | Python | apache-2.0 | 12,384 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibClassify ontology reader.
The ontology reader reads currently either a RDF/SKOS taxonomy or a
simple controlled vocabulary file (1 word per line). The first role of
this module is to manage the cached version of the ontology file. The
second role is to hold all methods responsible for the creation of
regular expressions. These methods are grammatically related as we take
care of different forms of the same words. The grammatical rules can be
configured via the configuration file.
The main method from this module is get_regular_expressions.
"""
from __future__ import print_function
from datetime import datetime, timedelta
from six import iteritems
from six.moves import cPickle
import os
import re
import sys
import tempfile
import time
import urllib2
import traceback
import xml.sax
import thread
import rdflib
from invenio.legacy.bibclassify import config as bconfig
from invenio.modules.classifier.errors import TaxonomyError
log = bconfig.get_logger("bibclassify.ontology_reader")
from invenio import config
from invenio.modules.classifier.registry import taxonomies
# only if not running in a stanalone mode
if bconfig.STANDALONE:
dbquery = None
from urllib2 import urlopen
else:
from invenio.legacy import dbquery
from invenio.utils.url import make_invenio_opener
urlopen = make_invenio_opener('BibClassify').open
_contains_digit = re.compile("\d")
_starts_with_non = re.compile("(?i)^non[a-z]")
_starts_with_anti = re.compile("(?i)^anti[a-z]")
_split_by_punctuation = re.compile("(\W+)")
_CACHE = {}
def get_cache(taxonomy_id):
"""Return thread-safe cache for the given taxonomy id.
:param taxonomy_id: identifier of the taxonomy
:type taxonomy_id: str
:return: dictionary object (empty if no taxonomy_id
is found), you must not change anything inside it.
Create a new dictionary and use set_cache if you want
to update the cache!
"""
# Because of a standalone mode, we don't use the
# invenio.data_cacher.DataCacher, but it has no effect
# on proper functionality.
if taxonomy_id in _CACHE:
ctime, taxonomy = _CACHE[taxonomy_id]
# check it is fresh version
onto_name, onto_path, onto_url = _get_ontology(taxonomy_id)
cache_path = _get_cache_path(onto_name)
# if source exists and is newer than the cache hold in memory
if os.path.isfile(onto_path) and os.path.getmtime(onto_path) > ctime:
log.info('Forcing taxonomy rebuild as cached'
' version is newer/updated.')
return {} # force cache rebuild
# if cache exists and is newer than the cache hold in memory
if os.path.isfile(cache_path) and os.path.getmtime(cache_path) > ctime:
log.info('Forcing taxonomy rebuild as source'
' file is newer/updated.')
return {}
log.info('Taxonomy retrieved from cache')
return taxonomy
return {}
def set_cache(taxonomy_id, contents):
"""Update cache in a thread-safe manner."""
lock = thread.allocate_lock()
lock.acquire()
try:
_CACHE[taxonomy_id] = (time.time(), contents)
finally:
lock.release()
def get_regular_expressions(taxonomy_name, rebuild=False, no_cache=False):
"""Return a list of patterns compiled from the RDF/SKOS ontology.
Uses cache if it exists and if the taxonomy hasn't changed.
"""
# Translate the ontology name into a local path. Check if the name
# relates to an existing ontology.
onto_name, onto_path, onto_url = _get_ontology(taxonomy_name)
if not onto_path:
raise TaxonomyError("Unable to locate the taxonomy: '%s'."
% taxonomy_name)
cache_path = _get_cache_path(onto_name)
log.debug('Taxonomy discovered, now we load it '
'(from cache: %s, onto_path: %s, cache_path: %s)'
% (not no_cache, onto_path, cache_path))
if os.access(cache_path, os.R_OK):
if os.access(onto_path, os.R_OK):
if rebuild or no_cache:
log.debug("Cache generation was manually forced.")
return _build_cache(onto_path, skip_cache=no_cache)
else:
# ontology file not found. Use the cache instead.
log.warning("The ontology couldn't be located. However "
"a cached version of it is available. Using it as a "
"reference.")
return _get_cache(cache_path, source_file=onto_path)
if (os.path.getmtime(cache_path) >
os.path.getmtime(onto_path)):
# Cache is more recent than the ontology: use cache.
log.debug("Normal situation, cache is older than ontology,"
" so we load it from cache")
return _get_cache(cache_path, source_file=onto_path)
else:
# Ontology is more recent than the cache: rebuild cache.
log.warning("Cache '%s' is older than '%s'. "
"We will rebuild the cache" %
(cache_path, onto_path))
return _build_cache(onto_path, skip_cache=no_cache)
elif os.access(onto_path, os.R_OK):
if not no_cache and\
os.path.exists(cache_path) and\
not os.access(cache_path, os.W_OK):
raise TaxonomyError('We cannot read/write into: %s. '
'Aborting!' % cache_path)
elif not no_cache and os.path.exists(cache_path):
log.warning('Cache %s exists, but is not readable!' % cache_path)
log.info("Cache not available. Building it now: %s" % onto_path)
return _build_cache(onto_path, skip_cache=no_cache)
else:
raise TaxonomyError("We miss both source and cache"
" of the taxonomy: %s" % taxonomy_name)
def _get_remote_ontology(onto_url, time_difference=None):
"""Check if the online ontology is more recent than the local ontology.
If yes, try to download and store it in Invenio's cache directory.
Return a boolean describing the success of the operation.
:return: path to the downloaded ontology.
"""
if onto_url is None:
return False
dl_dir = ((config.CFG_CACHEDIR or tempfile.gettempdir()) + os.sep +
"bibclassify" + os.sep)
if not os.path.exists(dl_dir):
os.mkdir(dl_dir)
local_file = dl_dir + os.path.basename(onto_url)
remote_modif_time = _get_last_modification_date(onto_url)
try:
local_modif_seconds = os.path.getmtime(local_file)
except OSError:
# The local file does not exist. Download the ontology.
download = True
log.info("The local ontology could not be found.")
else:
local_modif_time = datetime(*time.gmtime(local_modif_seconds)[0:6])
# Let's set a time delta of 1 hour and 10 minutes.
time_difference = time_difference or timedelta(hours=1, minutes=10)
download = remote_modif_time > local_modif_time + time_difference
if download:
log.info("The remote ontology '%s' is more recent "
"than the local ontology." % onto_url)
if download:
if not _download_ontology(onto_url, local_file):
log.warning("Error downloading the ontology from: %s" % onto_url)
return local_file
def _get_ontology(ontology):
"""Return the (name, path, url) to the short ontology name.
:param ontology: name of the ontology or path to the file or url.
"""
onto_name = onto_path = onto_url = None
# first assume we got the path to the file
if os.path.exists(ontology):
onto_name = os.path.split(os.path.abspath(ontology))[1]
onto_path = os.path.abspath(ontology)
onto_url = ""
else:
# if not, try to find it in a known locations
discovered_file = _discover_ontology(ontology)
if discovered_file:
onto_name = os.path.split(discovered_file)[1]
onto_path = discovered_file
# i know, this sucks
x = ontology.lower()
if "http:" in x or "https:" in x or "ftp:" in x or "file:" in x:
onto_url = ontology
else:
onto_url = ""
else:
# not found, look into a database
# (it is last because when bibclassify
# runs in a standalone mode,
# it has no database - [rca, old-heritage]
if not bconfig.STANDALONE:
result = dbquery.run_sql("SELECT name, location from clsMETHOD WHERE name LIKE %s",
('%' + ontology + '%',))
for onto_short_name, url in result:
onto_name = onto_short_name
onto_path = _get_remote_ontology(url)
onto_url = url
return (onto_name, onto_path, onto_url)
def _discover_ontology(ontology_name):
"""Look for the file in a known places.
Inside invenio/etc/bibclassify and a few other places
like current directory.
:param ontology: name or path name or url
:type ontology: str
:return: absolute path of a file if found, or None
"""
last_part = os.path.split(os.path.abspath(ontology_name))[1]
if last_part in taxonomies:
return taxonomies.get(last_part)
elif last_part + ".rdf" in taxonomies:
return taxonomies.get(last_part + ".rdf")
else:
log.debug("No taxonomy with pattern '%s' found" % ontology_name)
# LEGACY
possible_patterns = [last_part, last_part.lower()]
if not last_part.endswith('.rdf'):
possible_patterns.append(last_part + '.rdf')
places = [config.CFG_CACHEDIR,
config.CFG_ETCDIR,
os.path.join(config.CFG_CACHEDIR, "bibclassify"),
os.path.join(config.CFG_ETCDIR, "bibclassify"),
os.path.abspath('.'),
os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../etc/bibclassify")),
os.path.join(os.path.dirname(__file__), "bibclassify"),
config.CFG_WEBDIR]
log.debug("Searching for taxonomy using string: %s" % last_part)
log.debug("Possible patterns: %s" % possible_patterns)
for path in places:
try:
if os.path.isdir(path):
log.debug("Listing: %s" % path)
for filename in os.listdir(path):
#log.debug('Testing: %s' % filename)
for pattern in possible_patterns:
filename_lc = filename.lower()
if pattern == filename_lc and\
os.path.exists(os.path.join(path, filename)):
filepath = os.path.abspath(os.path.join(path,
filename))
if (os.access(filepath, os.R_OK)):
log.debug("Found taxonomy at: %s" % filepath)
return filepath
else:
log.warning('Found taxonony at: %s, but it is'
' not readable. '
'Continue searching...'
% filepath)
except OSError, os_error_msg:
log.warning('OS Error when listing path '
'"%s": %s' % (str(path), str(os_error_msg)))
log.debug("No taxonomy with pattern '%s' found" % ontology_name)
class KeywordToken:
"""KeywordToken is a class used for the extracted keywords.
It can be initialized with values from RDF store or from
simple strings. Specialty of this class is that objects are
hashable by subject - so in the dictionary two objects with the
same subject appears as one -- :see: self.__hash__ and self.__cmp__.
"""
def __init__(self, subject, store=None, namespace=None, type='HEP'):
"""Initialize KeywordToken with a subject.
:param subject: string or RDF object
:param store: RDF graph object
(will be used to get info about the subject)
:param namespace: RDF namespace object, used together with store
:param type: type of this keyword.
"""
self.id = subject
self.type = type
self.short_id = subject
self.concept = ""
self.regex = []
self.nostandalone = False
self.spires = False
self.fieldcodes = []
self.compositeof = []
self.core = False
# True means composite keyword
self._composite = '#Composite' in subject
self.__hash = None
# the tokens are coming possibly from a normal text file
if store is None:
subject = subject.strip()
self.concept = subject
self.regex = _get_searchable_regex(basic=[subject])
self.nostandalone = False
self.fieldcodes = []
self.core = False
if subject.find(' ') > -1:
self._composite = True
# definitions from rdf
else:
self.short_id = self.short_id.split('#')[-1]
# find alternate names for this label
basic_labels = []
# turn those patterns into regexes only for simple keywords
if self._composite is False:
try:
for label in store.objects(subject,
namespace["prefLabel"]):
# XXX shall i make it unicode?
basic_labels.append(str(label))
except TypeError:
pass
self.concept = basic_labels[0]
else:
try:
self.concept = str(store.value(subject,
namespace["prefLabel"],
any=True))
except KeyError:
log.warning("Keyword with subject %s has no prefLabel."
" We use raw name" %
self.short_id)
self.concept = self.short_id
# this is common both to composite and simple keywords
try:
for label in store.objects(subject, namespace["altLabel"]):
basic_labels.append(str(label))
except TypeError:
pass
# hidden labels are special (possibly regex) codes
hidden_labels = []
try:
for label in store.objects(subject, namespace["hiddenLabel"]):
hidden_labels.append(unicode(label))
except TypeError:
pass
# compile regular expression that will identify this token
self.regex = _get_searchable_regex(basic_labels, hidden_labels)
try:
for note in map(lambda s: str(s).lower().strip(),
store.objects(subject, namespace["note"])):
if note == 'core':
self.core = True
elif note in ("nostandalone", "nonstandalone"):
self.nostandalone = True
elif 'fc:' in note:
self.fieldcodes.append(note[3:].strip())
except TypeError:
pass
# spiresLabel does not have multiple values
spires_label = store.value(subject, namespace["spiresLabel"])
if spires_label:
self.spires = str(spires_label)
# important for comparisons
self.__hash = hash(self.short_id)
# extract composite parts ids
if store is not None and self.isComposite():
small_subject = self.id.split("#Composite.")[-1]
component_positions = []
for label in store.objects(self.id, namespace["compositeOf"]):
strlabel = str(label).split("#")[-1]
component_name = label.split("#")[-1]
component_positions.append((small_subject.find(component_name),
strlabel))
component_positions.sort()
if not component_positions:
log.error("Keyword is marked as composite, "
"but no composite components refs found: %s"
% self.short_id)
else:
self.compositeof = map(lambda x: x[1], component_positions)
def refreshCompositeOf(self, single_keywords, composite_keywords,
store=None, namespace=None):
"""Re-check sub-parts of this keyword.
This should be called after the whole RDF was processed, because
it is using a cache of single keywords and if that
one is incomplete, you will not identify all parts.
"""
def _get_ckw_components(new_vals, label):
if label in single_keywords:
new_vals.append(single_keywords[label])
elif ('Composite.%s' % label) in composite_keywords:
for l in composite_keywords['Composite.%s' % label].compositeof:
_get_ckw_components(new_vals, l)
elif label in composite_keywords:
for l in composite_keywords[label].compositeof:
_get_ckw_components(new_vals, l)
else:
# One single or composite keyword is missing from the taxonomy.
# This is due to an error in the taxonomy description.
message = "The composite term \"%s\""\
" should be made of single keywords,"\
" but at least one is missing." % self.id
if store is not None:
message += "Needed components: %s"\
% list(store.objects(self.id,
namespace["compositeOf"]))
message += " Missing is: %s" % label
raise TaxonomyError(message)
if self.compositeof:
new_vals = []
try:
for label in self.compositeof:
_get_ckw_components(new_vals, label)
self.compositeof = new_vals
except TaxonomyError:
# the composites will be empty
# (better than to have confusing, partial matches)
self.compositeof = []
log.error(
'We reset this composite keyword, so that it does not match anything. Please fix the taxonomy.')
def isComposite(self):
"""Return value of _composite."""
return self._composite
def getComponents(self):
"""Return value of compositeof."""
return self.compositeof
def getType(self):
"""Return value of type."""
return self.type
def setType(self, value):
"""Set value of value."""
self.type = value
def __hash__(self):
"""Return _hash.
This might change in the future but for the moment we want to
think that if the concept is the same, then it is the same
keyword - this sucks, but it is sort of how it is necessary
to use now.
"""
return self.__hash
def __cmp__(self, other):
"""Compare objects using _hash."""
if self.__hash < other.__hash__():
return -1
elif self.__hash == other.__hash__():
return 0
else:
return 1
def __str__(self, spires=False):
"""Return the best output for the keyword."""
if spires:
if self.spires:
return self.spires
elif self._composite:
return self.concept.replace(':', ',')
# default action
return self.concept
def output(self, spires=False):
"""Return string representation with spires value."""
return self.__str__(spires=spires)
def __repr__(self):
"""Class representation."""
return "<KeywordToken: %s>" % self.short_id
def _build_cache(source_file, skip_cache=False):
"""Build the cached data.
Either by parsing the RDF taxonomy file or a vocabulary file.
:param source_file: source file of the taxonomy, RDF file
:param skip_cache: if True, build cache will not be
saved (pickled) - it is saved as <source_file.db>
"""
store = rdflib.ConjunctiveGraph()
if skip_cache:
log.info("You requested not to save the cache to disk.")
else:
cache_path = _get_cache_path(source_file)
cache_dir = os.path.dirname(cache_path)
# Make sure we have a cache_dir readable and writable.
try:
os.makedirs(cache_dir)
except:
pass
if os.access(cache_dir, os.R_OK):
if not os.access(cache_dir, os.W_OK):
raise TaxonomyError("Cache directory exists but is not"
" writable. Check your permissions"
" for: %s" % cache_dir)
else:
raise TaxonomyError("Cache directory does not exist"
" (and could not be created): %s" % cache_dir)
timer_start = time.clock()
namespace = None
single_keywords, composite_keywords = {}, {}
try:
log.info("Building RDFLib's conjunctive graph from: %s" % source_file)
try:
store.parse(source_file)
except urllib2.URLError:
if source_file[0] == '/':
store.parse("file://" + source_file)
else:
store.parse("file:///" + source_file)
except rdflib.exceptions.Error as e:
log.error("Serious error reading RDF file")
log.error(e)
log.error(traceback.format_exc())
raise rdflib.exceptions.Error(e)
except (xml.sax.SAXParseException, ImportError) as e:
# File is not a RDF file. We assume it is a controlled vocabulary.
log.error(e)
log.warning("The ontology file is probably not a valid RDF file. \
Assuming it is a controlled vocabulary file.")
filestream = open(source_file, "r")
for line in filestream:
keyword = line.strip()
kt = KeywordToken(keyword)
single_keywords[kt.short_id] = kt
if not len(single_keywords):
raise TaxonomyError('The ontology file is not well formated')
else: # ok, no exception happened
log.info("Now building cache of keywords")
# File is a RDF file.
namespace = rdflib.Namespace("http://www.w3.org/2004/02/skos/core#")
single_count = 0
composite_count = 0
subject_objects = store.subject_objects(namespace["prefLabel"])
for subject, pref_label in subject_objects:
kt = KeywordToken(subject, store=store, namespace=namespace)
if kt.isComposite():
composite_count += 1
composite_keywords[kt.short_id] = kt
else:
single_keywords[kt.short_id] = kt
single_count += 1
cached_data = {}
cached_data["single"] = single_keywords
cached_data["composite"] = composite_keywords
cached_data["creation_time"] = time.gmtime()
cached_data["version_info"] = {'rdflib': rdflib.__version__,
'bibclassify': bconfig.VERSION}
log.debug("Building taxonomy... %d terms built in %.1f sec." %
(len(single_keywords) + len(composite_keywords),
time.clock() - timer_start))
log.info("Total count of single keywords: %d "
% len(single_keywords))
log.info("Total count of composite keywords: %d "
% len(composite_keywords))
if not skip_cache:
cache_path = _get_cache_path(source_file)
cache_dir = os.path.dirname(cache_path)
log.debug("Writing the cache into: %s" % cache_path)
# test again, it could have changed
if os.access(cache_dir, os.R_OK):
if os.access(cache_dir, os.W_OK):
# Serialize.
filestream = None
try:
filestream = open(cache_path, "wb")
except IOError as msg:
# Impossible to write the cache.
log.error("Impossible to write cache to '%s'."
% cache_path)
log.error(msg)
else:
log.debug("Writing cache to file %s" % cache_path)
cPickle.dump(cached_data, filestream, 1)
if filestream:
filestream.close()
else:
raise TaxonomyError("Cache directory exists but is not "
"writable. Check your permissions "
"for: %s" % cache_dir)
else:
raise TaxonomyError("Cache directory does not exist"
" (and could not be created): %s" % cache_dir)
# now when the whole taxonomy was parsed,
# find sub-components of the composite kws
# it is important to keep this call after the taxonomy was saved,
# because we don't want to pickle regexes multiple times
# (as they are must be re-compiled at load time)
for kt in composite_keywords.values():
kt.refreshCompositeOf(single_keywords, composite_keywords,
store=store, namespace=namespace)
# house-cleaning
if store:
store.close()
return (single_keywords, composite_keywords)
def _capitalize_first_letter(word):
"""Return a regex pattern with the first letter.
Accepts both lowercase and uppercase.
"""
if word[0].isalpha():
# These two cases are necessary in order to get a regex pattern
# starting with '[xX]' and not '[Xx]'. This allows to check for
# colliding regex afterwards.
if word[0].isupper():
return "[" + word[0].swapcase() + word[0] + "]" + word[1:]
else:
return "[" + word[0] + word[0].swapcase() + "]" + word[1:]
return word
def _convert_punctuation(punctuation, conversion_table):
"""Return a regular expression for a punctuation string."""
if punctuation in conversion_table:
return conversion_table[punctuation]
return re.escape(punctuation)
def _convert_word(word):
"""Return the plural form of the word if it exists.
Otherwise return the word itself.
"""
out = None
# Acronyms.
if word.isupper():
out = word + "s?"
# Proper nouns or word with digits.
elif word.istitle():
out = word + "('?s)?"
elif _contains_digit.search(word):
out = word
if out is not None:
return out
# Words with non or anti prefixes.
if _starts_with_non.search(word):
word = "non-?" + _capitalize_first_letter(_convert_word(word[3:]))
elif _starts_with_anti.search(word):
word = "anti-?" + _capitalize_first_letter(_convert_word(word[4:]))
if out is not None:
return _capitalize_first_letter(out)
# A few invariable words.
if word in bconfig.CFG_BIBCLASSIFY_INVARIABLE_WORDS:
return _capitalize_first_letter(word)
# Some exceptions that would not produce good results with the set of
# general_regular_expressions.
regexes = bconfig.CFG_BIBCLASSIFY_EXCEPTIONS
if word in regexes:
return _capitalize_first_letter(regexes[word])
regexes = bconfig.CFG_BIBCLASSIFY_UNCHANGE_REGULAR_EXPRESSIONS
for regex in regexes:
if regex.search(word) is not None:
return _capitalize_first_letter(word)
regexes = bconfig.CFG_BIBCLASSIFY_GENERAL_REGULAR_EXPRESSIONS
for regex, replacement in regexes:
stemmed = regex.sub(replacement, word)
if stemmed != word:
return _capitalize_first_letter(stemmed)
return _capitalize_first_letter(word + "s?")
def _get_cache(cache_file, source_file=None):
"""Get cached taxonomy using the cPickle module.
No check is done at that stage.
:param cache_file: full path to the file holding pickled data
:param source_file: if we discover the cache is obsolete, we
will build a new cache, therefore we need the source path
of the cache
:return: (single_keywords, composite_keywords).
"""
timer_start = time.clock()
filestream = open(cache_file, "rb")
try:
cached_data = cPickle.load(filestream)
version_info = cached_data['version_info']
if version_info['rdflib'] != rdflib.__version__\
or version_info['bibclassify'] != bconfig.VERSION:
raise KeyError
except (cPickle.UnpicklingError, ImportError,
AttributeError, DeprecationWarning, EOFError):
log.warning("The existing cache in %s is not readable. "
"Removing and rebuilding it." % cache_file)
filestream.close()
os.remove(cache_file)
return _build_cache(source_file)
except KeyError:
log.warning("The existing cache %s is not up-to-date. "
"Removing and rebuilding it." % cache_file)
filestream.close()
os.remove(cache_file)
if source_file and os.path.exists(source_file):
return _build_cache(source_file)
else:
log.error("The cache contains obsolete data (and it was deleted), "
"however I can't build a new cache, the source does not "
"exist or is inaccessible! - %s" % source_file)
filestream.close()
single_keywords = cached_data["single"]
composite_keywords = cached_data["composite"]
# the cache contains only keys of the composite keywords, not the objects
# so now let's resolve them into objects
for kw in composite_keywords.values():
kw.refreshCompositeOf(single_keywords, composite_keywords)
log.debug("Retrieved taxonomy from cache %s created on %s" %
(cache_file, time.asctime(cached_data["creation_time"])))
log.debug("%d terms read in %.1f sec." %
(len(single_keywords) + len(composite_keywords),
time.clock() - timer_start))
return (single_keywords, composite_keywords)
def _get_cache_path(source_file):
"""Return the path where the cache should be written/located.
:param onto_name: name of the ontology or the full path
:return: string, abs path to the cache file in the tmpdir/bibclassify
"""
local_name = os.path.basename(source_file)
cache_name = local_name + ".db"
cache_dir = os.path.join(config.CFG_CACHEDIR, "bibclassify")
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
return os.path.abspath(os.path.join(cache_dir, cache_name))
def _get_last_modification_date(url):
"""Get the last modification date of the ontology."""
request = urllib2.Request(url)
request.get_method = lambda: "HEAD"
http_file = urlopen(request)
date_string = http_file.headers["last-modified"]
parsed = time.strptime(date_string, "%a, %d %b %Y %H:%M:%S %Z")
return datetime(*(parsed)[0:6])
def _download_ontology(url, local_file):
"""Download the ontology and stores it in CFG_CACHEDIR."""
log.debug("Copying remote ontology '%s' to file '%s'." % (url,
local_file))
try:
url_desc = urlopen(url)
file_desc = open(local_file, 'w')
file_desc.write(url_desc.read())
file_desc.close()
except IOError as e:
print(e)
return False
except:
log.warning("Unable to download the ontology. '%s'" %
sys.exc_info()[0])
return False
else:
log.debug("Done copying.")
return True
def _get_searchable_regex(basic=None, hidden=None):
"""Return the searchable regular expressions for the single keyword."""
# Hidden labels are used to store regular expressions.
basic = basic or []
hidden = hidden or []
hidden_regex_dict = {}
for hidden_label in hidden:
if _is_regex(hidden_label):
hidden_regex_dict[hidden_label] = \
re.compile(
bconfig.CFG_BIBCLASSIFY_WORD_WRAP % hidden_label[1:-1]
)
else:
pattern = _get_regex_pattern(hidden_label)
hidden_regex_dict[hidden_label] = re.compile(
bconfig.CFG_BIBCLASSIFY_WORD_WRAP % pattern
)
# We check if the basic label (preferred or alternative) is matched
# by a hidden label regex. If yes, discard it.
regex_dict = {}
# Create regex for plural forms and add them to the hidden labels.
for label in basic:
pattern = _get_regex_pattern(label)
regex_dict[label] = re.compile(
bconfig.CFG_BIBCLASSIFY_WORD_WRAP % pattern
)
# Merge both dictionaries.
regex_dict.update(hidden_regex_dict)
return regex_dict.values()
def _get_regex_pattern(label):
"""Return a regular expression of the label.
This takes care of plural and different kinds of separators.
"""
parts = _split_by_punctuation.split(label)
for index, part in enumerate(parts):
if index % 2 == 0:
# Word
if not parts[index].isdigit() and len(parts[index]) > 1:
parts[index] = _convert_word(parts[index])
else:
# Punctuation
if not parts[index + 1]:
# The separator is not followed by another word. Treat
# it as a symbol.
parts[index] = _convert_punctuation(
parts[index],
bconfig.CFG_BIBCLASSIFY_SYMBOLS
)
else:
parts[index] = _convert_punctuation(
parts[index],
bconfig.CFG_BIBCLASSIFY_SEPARATORS
)
return "".join(parts)
def _is_regex(string):
"""Check if a concept is a regular expression."""
return string[0] == "/" and string[-1] == "/"
def check_taxonomy(taxonomy):
"""Check the consistency of the taxonomy.
Outputs a list of errors and warnings.
"""
log.info("Building graph with Python RDFLib version %s" %
rdflib.__version__)
store = rdflib.ConjunctiveGraph()
try:
store.parse(taxonomy)
except:
log.error("The taxonomy is not a valid RDF file. Are you "
"trying to check a controlled vocabulary?")
raise TaxonomyError('Error in RDF file')
log.info("Graph was successfully built.")
prefLabel = "prefLabel"
hiddenLabel = "hiddenLabel"
altLabel = "altLabel"
composite = "composite"
compositeOf = "compositeOf"
note = "note"
both_skw_and_ckw = []
# Build a dictionary we will reason on later.
uniq_subjects = {}
for subject in store.subjects():
uniq_subjects[subject] = None
subjects = {}
for subject in uniq_subjects:
strsubject = str(subject).split("#Composite.")[-1]
strsubject = strsubject.split("#")[-1]
if (strsubject == "http://cern.ch/thesauri/HEPontology.rdf" or
strsubject == "compositeOf"):
continue
components = {}
for predicate, value in store.predicate_objects(subject):
strpredicate = str(predicate).split("#")[-1]
strobject = str(value).split("#Composite.")[-1]
strobject = strobject.split("#")[-1]
components.setdefault(strpredicate, []).append(strobject)
if strsubject in subjects:
both_skw_and_ckw.append(strsubject)
else:
subjects[strsubject] = components
log.info("Taxonomy contains %s concepts." % len(subjects))
no_prefLabel = []
multiple_prefLabels = []
bad_notes = []
# Subjects with no composite or compositeOf predicate
lonely = []
both_composites = []
bad_hidden_labels = {}
bad_alt_labels = {}
# Problems with composite keywords
composite_problem1 = []
composite_problem2 = []
composite_problem3 = []
composite_problem4 = {}
composite_problem5 = []
composite_problem6 = []
stemming_collisions = []
interconcept_collisions = {}
for subject, predicates in iteritems(subjects):
# No prefLabel or multiple prefLabels
try:
if len(predicates[prefLabel]) > 1:
multiple_prefLabels.append(subject)
except KeyError:
no_prefLabel.append(subject)
# Lonely and both composites.
if composite not in predicates and compositeOf not in predicates:
lonely.append(subject)
elif composite in predicates and compositeOf in predicates:
both_composites.append(subject)
# Multiple or bad notes
if note in predicates:
bad_notes += [(subject, n) for n in predicates[note]
if n not in ('nostandalone', 'core')]
# Bad hidden labels
if hiddenLabel in predicates:
for lbl in predicates[hiddenLabel]:
if lbl.startswith("/") ^ lbl.endswith("/"):
bad_hidden_labels.setdefault(subject, []).append(lbl)
# Bad alt labels
if altLabel in predicates:
for lbl in predicates[altLabel]:
if len(re.findall("/", lbl)) >= 2 or ":" in lbl:
bad_alt_labels.setdefault(subject, []).append(lbl)
# Check composite
if composite in predicates:
for ckw in predicates[composite]:
if ckw in subjects:
if compositeOf in subjects[ckw]:
if subject not in subjects[ckw][compositeOf]:
composite_problem3.append((subject, ckw))
else:
if ckw not in both_skw_and_ckw:
composite_problem2.append((subject, ckw))
else:
composite_problem1.append((subject, ckw))
# Check compositeOf
if compositeOf in predicates:
for skw in predicates[compositeOf]:
if skw in subjects:
if composite in subjects[skw]:
if subject not in subjects[skw][composite]:
composite_problem6.append((subject, skw))
else:
if skw not in both_skw_and_ckw:
composite_problem5.append((subject, skw))
else:
composite_problem4.setdefault(skw, []).append(subject)
# Check for stemmed labels
if compositeOf in predicates:
labels = (altLabel, hiddenLabel)
else:
labels = (prefLabel, altLabel, hiddenLabel)
patterns = {}
for label in [lbl for lbl in labels if lbl in predicates]:
for expression in [expr for expr in predicates[label]
if not _is_regex(expr)]:
pattern = _get_regex_pattern(expression)
interconcept_collisions.setdefault(pattern, []).\
append((subject, label))
if pattern in patterns:
stemming_collisions.append(
(subject,
patterns[pattern],
(label, expression)
)
)
else:
patterns[pattern] = (label, expression)
print("\n==== ERRORS ====")
if no_prefLabel:
print("\nConcepts with no prefLabel: %d" % len(no_prefLabel))
print("\n".join([" %s" % subj for subj in no_prefLabel]))
if multiple_prefLabels:
print(("\nConcepts with multiple prefLabels: %d" %
len(multiple_prefLabels)))
print("\n".join([" %s" % subj for subj in multiple_prefLabels]))
if both_composites:
print(("\nConcepts with both composite properties: %d" %
len(both_composites)))
print("\n".join([" %s" % subj for subj in both_composites]))
if bad_hidden_labels:
print("\nConcepts with bad hidden labels: %d" % len(bad_hidden_labels))
for kw, lbls in iteritems(bad_hidden_labels):
print(" %s:" % kw)
print("\n".join([" '%s'" % lbl for lbl in lbls]))
if bad_alt_labels:
print("\nConcepts with bad alt labels: %d" % len(bad_alt_labels))
for kw, lbls in iteritems(bad_alt_labels):
print(" %s:" % kw)
print("\n".join([" '%s'" % lbl for lbl in lbls]))
if both_skw_and_ckw:
print(("\nKeywords that are both skw and ckw: %d" %
len(both_skw_and_ckw)))
print("\n".join([" %s" % subj for subj in both_skw_and_ckw]))
print()
if composite_problem1:
print("\n".join(["SKW '%s' references an unexisting CKW '%s'." %
(skw, ckw) for skw, ckw in composite_problem1]))
if composite_problem2:
print("\n".join(["SKW '%s' references a SKW '%s'." %
(skw, ckw) for skw, ckw in composite_problem2]))
if composite_problem3:
print("\n".join(["SKW '%s' is not composite of CKW '%s'." %
(skw, ckw) for skw, ckw in composite_problem3]))
if composite_problem4:
for skw, ckws in iteritems(composite_problem4):
print("SKW '%s' does not exist but is " "referenced by:" % skw)
print("\n".join([" %s" % ckw for ckw in ckws]))
if composite_problem5:
print("\n".join(["CKW '%s' references a CKW '%s'." % kw
for kw in composite_problem5]))
if composite_problem6:
print("\n".join(["CKW '%s' is not composed by SKW '%s'." % kw
for kw in composite_problem6]))
print("\n==== WARNINGS ====")
if bad_notes:
print(("\nConcepts with bad notes: %d" % len(bad_notes)))
print("\n".join([" '%s': '%s'" % _note for _note in bad_notes]))
if stemming_collisions:
print("\nFollowing keywords have unnecessary labels that have "
"already been generated by BibClassify.")
for subj in stemming_collisions:
print(" %s:\n %s\n and %s" % subj)
print("\nFinished.")
sys.exit(0)
def test_cache(taxonomy_name='HEP', rebuild_cache=False, no_cache=False):
"""Test the cache lookup."""
cache = get_cache(taxonomy_name)
if not cache:
set_cache(taxonomy_name, get_regular_expressions(taxonomy_name,
rebuild=rebuild_cache,
no_cache=no_cache))
cache = get_cache(taxonomy_name)
return (thread.get_ident(), cache)
log.info('Loaded ontology reader')
if __name__ == '__main__':
test_cache()
| zenodo/invenio | invenio/legacy/bibclassify/ontology_reader.py | Python | gpl-2.0 | 44,242 |
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
|
This file contains the DAL support for many relational databases, including:
- SQLite & SpatiaLite
- MySQL
- Postgres
- Firebird
- Oracle
- MS SQL
- DB2
- Interbase
- Ingres
- Informix (9+ and SE)
- SapDB (experimental)
- Cubrid (experimental)
- CouchDB (experimental)
- MongoDB (in progress)
- Google:nosql
- Google:sql
- Teradata
- IMAP (experimental)
Example of usage::
>>> # from dal import DAL, Field
### create DAL connection (and create DB if it doesn't exist)
>>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'),
... folder=None)
### define a table 'person' (create/alter as necessary)
>>> person = db.define_table('person',Field('name','string'))
### insert a record
>>> id = person.insert(name='James')
### retrieve it by id
>>> james = person(id)
### retrieve it by name
>>> james = person(name='James')
### retrieve it by arbitrary query
>>> query = (person.name=='James') & (person.name.startswith('J'))
>>> james = db(query).select(person.ALL)[0]
### update one record
>>> james.update_record(name='Jim')
<Row {'id': 1, 'name': 'Jim'}>
### update multiple records by query
>>> db(person.name.like('J%')).update(name='James')
1
### delete records by query
>>> db(person.name.lower() == 'jim').delete()
0
### retrieve multiple records (rows)
>>> people = db(person).select(orderby=person.name,
... groupby=person.name, limitby=(0,100))
### further filter them
>>> james = people.find(lambda row: row.name == 'James').first()
>>> print james.id, james.name
1 James
### check aggregates
>>> counter = person.id.count()
>>> print db(person).select(counter).first()(counter)
1
### delete one record
>>> james.delete_record()
1
### delete (drop) entire database table
>>> person.drop()
Supported DAL URI strings::
'sqlite://test.db'
'spatialite://test.db'
'sqlite:memory'
'spatialite:memory'
'jdbc:sqlite://test.db'
'mysql://root:none@localhost/test'
'postgres://mdipierro:password@localhost/test'
'postgres:psycopg2://mdipierro:password@localhost/test'
'postgres:pg8000://mdipierro:password@localhost/test'
'jdbc:postgres://mdipierro:none@localhost/test'
'mssql://web2py:none@A64X2/web2py_test'
'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings
'mssql3://web2py:none@A64X2/web2py_test' # better pagination (requires >= 2005)
'mssql4://web2py:none@A64X2/web2py_test' # best pagination (requires >= 2012)
'oracle://username:password@database'
'firebird://user:password@server:3050/database'
'db2:ibm_db_dbi://DSN=dsn;UID=user;PWD=pass'
'db2:pyodbc://driver=DB2;hostname=host;database=database;uid=user;pwd=password;port=port'
'firebird://username:password@hostname/database'
'firebird_embedded://username:password@c://path'
'informix://user:password@server:3050/database'
'informixu://user:password@server:3050/database' # unicode informix
'ingres://database' # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name'
'google:datastore' # for google app engine datastore (uses ndb by default)
'google:sql' # for google app engine with sql (mysql compatible)
'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental
'imap://user:password@server:port' # experimental
'mongodb://user:password@server:port/database' # experimental
For more info::
help(DAL)
help(Field)
"""
import copy
import glob
import logging
import socket
import threading
import time
import traceback
import urllib
from uuid import uuid4
from ._compat import PY2, pickle, hashlib_md5, pjoin, copyreg, integer_types, \
with_metaclass
from ._globals import GLOBAL_LOCKER, THREAD_LOCAL, DEFAULT
from ._load import OrderedDict
from .helpers.classes import Serializable, SQLCallableList, BasicStorage
from .helpers.methods import hide_password, smart_query, auto_validators, \
auto_represent
from .helpers.regex import REGEX_PYTHON_KEYWORDS, REGEX_DBNAME, \
REGEX_SEARCH_PATTERN, REGEX_SQUARE_BRACKETS
from .helpers.serializers import serializers
from .objects import Table, Field, Row, Set
from .adapters import ADAPTERS
from .adapters.base import BaseAdapter
long = integer_types[-1]
TABLE_ARGS = set(
('migrate', 'primarykey', 'fake_migrate', 'format', 'redefine',
'singular', 'plural', 'trigger_name', 'sequence_name', 'fields',
'common_filter', 'polymodel', 'table_class', 'on_define', 'rname'))
class MetaDAL(type):
def __call__(cls, *args, **kwargs):
#: intercept arguments for DAL costumisation on call
intercepts = [
'logger', 'representers', 'serializers', 'uuid', 'validators',
'validators_method']
intercepted = []
for name in intercepts:
val = kwargs.get(name)
if val:
intercepted.append((name, val))
del kwargs[name]
for tup in intercepted:
setattr(cls, tup[0], tup[1])
obj = super(MetaDAL, cls).__call__(*args, **kwargs)
return obj
class DAL(with_metaclass(MetaDAL, Serializable, BasicStorage)):
"""
An instance of this class represents a database connection
Args:
uri(str): contains information for connecting to a database.
Defaults to `'sqlite://dummy.db'`
Note:
experimental: you can specify a dictionary as uri
parameter i.e. with::
db = DAL({"uri": "sqlite://storage.sqlite",
"tables": {...}, ...})
for an example of dict input you can check the output
of the scaffolding db model with
db.as_dict()
Note that for compatibility with Python older than
version 2.6.5 you should cast your dict input keys
to str due to a syntax limitation on kwarg names.
for proper DAL dictionary input you can use one of::
obj = serializers.cast_keys(dict, [encoding="utf-8"])
#or else (for parsing json input)
obj = serializers.loads_json(data, unicode_keys=False)
pool_size: How many open connections to make to the database object.
folder: where .table files will be created. Automatically set within
web2py. Use an explicit path when using DAL outside web2py
db_codec: string encoding of the database (default: 'UTF-8')
table_hash: database identifier with .tables. If your connection hash
change you can still using old .tables if they have db_hash
as prefix
check_reserved: list of adapters to check tablenames and column names
against sql/nosql reserved keywords. Defaults to `None`
- 'common' List of sql keywords that are common to all database
types such as "SELECT, INSERT". (recommended)
- 'all' Checks against all known SQL keywords
- '<adaptername>'' Checks against the specific adapters list of
keywords
- '<adaptername>_nonreserved' Checks against the specific adapters
list of nonreserved keywords. (if available)
migrate: sets default migrate behavior for all tables
fake_migrate: sets default fake_migrate behavior for all tables
migrate_enabled: If set to False disables ALL migrations
fake_migrate_all: If set to True fake migrates ALL tables
attempts: Number of times to attempt connecting
auto_import: If set to True, tries import automatically table
definitions from the databases folder (works only for simple models)
bigint_id: If set, turn on bigint instead of int for id and reference
fields
lazy_tables: delaya table definition until table access
after_connection: can a callable that will be executed after the
connection
Example:
Use as::
db = DAL('sqlite://test.db')
or::
db = DAL(**{"uri": ..., "tables": [...]...}) # experimental
db.define_table('tablename', Field('fieldname1'),
Field('fieldname2'))
"""
serializers = None
validators = None
validators_method = None
representers = {}
uuid = lambda x: str(uuid4())
logger = logging.getLogger("pyDAL")
Table = Table
def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
if not hasattr(THREAD_LOCAL, 'db_instances'):
THREAD_LOCAL.db_instances = {}
if not hasattr(THREAD_LOCAL, 'db_instances_zombie'):
THREAD_LOCAL.db_instances_zombie = {}
if uri == '<zombie>':
db_uid = kwargs['db_uid'] # a zombie must have a db_uid!
if db_uid in THREAD_LOCAL.db_instances:
db_group = THREAD_LOCAL.db_instances[db_uid]
db = db_group[-1]
elif db_uid in THREAD_LOCAL.db_instances_zombie:
db = THREAD_LOCAL.db_instances_zombie[db_uid]
else:
db = super(DAL, cls).__new__(cls)
THREAD_LOCAL.db_instances_zombie[db_uid] = db
else:
db_uid = kwargs.get('db_uid', hashlib_md5(repr(uri)).hexdigest())
if db_uid in THREAD_LOCAL.db_instances_zombie:
db = THREAD_LOCAL.db_instances_zombie[db_uid]
del THREAD_LOCAL.db_instances_zombie[db_uid]
else:
db = super(DAL, cls).__new__(cls)
db_group = THREAD_LOCAL.db_instances.get(db_uid, [])
db_group.append(db)
THREAD_LOCAL.db_instances[db_uid] = db_group
db._db_uid = db_uid
return db
@staticmethod
def set_folder(folder):
# ## this allows gluon to set a folder for this thread
# ## <<<<<<<<< Should go away as new DAL replaces old sql.py
BaseAdapter.set_folder(folder)
@staticmethod
def get_instances():
"""
Returns a dictionary with uri as key with timings and defined tables::
{'sqlite://storage.sqlite': {
'dbstats': [(select auth_user.email from auth_user, 0.02009)],
'dbtables': {
'defined': ['auth_cas', 'auth_event', 'auth_group',
'auth_membership', 'auth_permission', 'auth_user'],
'lazy': '[]'
}
}
}
"""
dbs = getattr(THREAD_LOCAL, 'db_instances', {}).items()
infos = {}
for db_uid, db_group in dbs:
for db in db_group:
if not db._uri:
continue
k = hide_password(db._adapter.uri)
infos[k] = dict(
dbstats = [(row[0], row[1]) for row in db._timings],
dbtables = {'defined': sorted(
list(set(db.tables)-set(db._LAZY_TABLES.keys()))),
'lazy': sorted(db._LAZY_TABLES.keys())})
return infos
@staticmethod
def distributed_transaction_begin(*instances):
if not instances:
return
thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread())
keys = ['%s.%i' % (thread_key, i) for (i,db) in instances]
instances = enumerate(instances)
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
'distributed transaction not suported by %s' % db._dbname)
for (i, db) in instances:
db._adapter.distributed_transaction_begin(keys[i])
@staticmethod
def distributed_transaction_commit(*instances):
if not instances:
return
instances = enumerate(instances)
thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread())
keys = ['%s.%i' % (thread_key, i) for (i,db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
'distributed transaction not suported by %s' % db._dbanme)
try:
for (i, db) in instances:
db._adapter.prepare(keys[i])
except:
for (i, db) in instances:
db._adapter.rollback_prepared(keys[i])
raise RuntimeError('failure to commit distributed transaction')
else:
for (i, db) in instances:
db._adapter.commit_prepared(keys[i])
return
def __init__(self, uri='sqlite://dummy.db',
pool_size=0, folder=None,
db_codec='UTF-8', check_reserved=None,
migrate=True, fake_migrate=False,
migrate_enabled=True, fake_migrate_all=False,
decode_credentials=False, driver_args=None,
adapter_args=None, attempts=5, auto_import=False,
bigint_id=False, debug=False, lazy_tables=False,
db_uid=None, do_connect=True,
after_connection=None, tables=None, ignore_field_case=True,
entity_quoting=False, table_hash=None):
if uri == '<zombie>' and db_uid is not None:
return
super(DAL, self).__init__()
from .drivers import DRIVERS, is_jdbc
self._drivers_available = DRIVERS
if not decode_credentials:
credential_decoder = lambda cred: cred
else:
credential_decoder = lambda cred: urllib.unquote(cred)
self._folder = folder
if folder:
self.set_folder(folder)
self._uri = uri
self._pool_size = pool_size
self._db_codec = db_codec
self._lastsql = ''
self._timings = []
self._pending_references = {}
self._request_tenant = 'request_tenant'
self._common_fields = []
self._referee_name = '%(table)s'
self._bigint_id = bigint_id
self._debug = debug
self._migrated = []
self._LAZY_TABLES = {}
self._lazy_tables = lazy_tables
self._tables = SQLCallableList()
self._driver_args = driver_args
self._adapter_args = adapter_args
self._check_reserved = check_reserved
self._decode_credentials = decode_credentials
self._attempts = attempts
self._do_connect = do_connect
self._ignore_field_case = ignore_field_case
if not str(attempts).isdigit() or attempts < 0:
attempts = 5
if uri:
uris = isinstance(uri, (list, tuple)) and uri or [uri]
error = ''
connected = False
for k in range(attempts):
for uri in uris:
try:
if is_jdbc and not uri.startswith('jdbc:'):
uri = 'jdbc:'+uri
self._dbname = REGEX_DBNAME.match(uri).group()
if not self._dbname in ADAPTERS:
raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname)
# notice that driver args or {} else driver_args
# defaults to {} global, not correct
kwargs = dict(db=self,uri=uri,
pool_size=pool_size,
folder=folder,
db_codec=db_codec,
credential_decoder=credential_decoder,
driver_args=driver_args or {},
adapter_args=adapter_args or {},
do_connect=do_connect,
after_connection=after_connection,
entity_quoting=entity_quoting)
self._adapter = ADAPTERS[self._dbname](**kwargs)
types = ADAPTERS[self._dbname].types
# copy so multiple DAL() possible
self._adapter.types = copy.copy(types)
self._adapter.build_parsemap()
self._adapter.ignore_field_case = ignore_field_case
if bigint_id:
if 'big-id' in types and 'reference' in types:
self._adapter.types['id'] = types['big-id']
self._adapter.types['reference'] = types['big-reference']
connected = True
break
except SyntaxError:
raise
except Exception:
tb = traceback.format_exc()
self.logger.debug('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb))
if connected:
break
else:
time.sleep(1)
if not connected:
raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb))
else:
self._adapter = BaseAdapter(db=self,pool_size=0,
uri='None',folder=folder,
db_codec=db_codec, after_connection=after_connection,
entity_quoting=entity_quoting)
migrate = fake_migrate = False
adapter = self._adapter
self._uri_hash = table_hash or hashlib_md5(adapter.uri).hexdigest()
self.check_reserved = check_reserved
if self.check_reserved:
from .contrib.reserved_sql_keywords import ADAPTERS as RSK
self.RSK = RSK
self._migrate = migrate
self._fake_migrate = fake_migrate
self._migrate_enabled = migrate_enabled
self._fake_migrate_all = fake_migrate_all
if self.serializers is not None:
for k, v in self.serializers.items():
serializers._custom_[k] = v
if auto_import or tables:
self.import_table_definitions(adapter.folder,
tables=tables)
@property
def tables(self):
return self._tables
def import_table_definitions(self, path, migrate=False,
fake_migrate=False, tables=None):
if tables:
for table in tables:
self.define_table(**table)
else:
pattern = pjoin(path,self._uri_hash+'_*.table')
for filename in glob.glob(pattern):
tfile = self._adapter.file_open(filename, 'r')
try:
sql_fields = pickle.load(tfile)
name = filename[len(pattern)-7:-6]
mf = [(value['sortable'],
Field(key,
type=value['type'],
length=value.get('length',None),
notnull=value.get('notnull',False),
unique=value.get('unique',False))) \
for key, value in sql_fields.iteritems()]
mf.sort(lambda a,b: cmp(a[0],b[0]))
self.define_table(name,*[item[1] for item in mf],
**dict(migrate=migrate,
fake_migrate=fake_migrate))
finally:
self._adapter.file_close(tfile)
def check_reserved_keyword(self, name):
"""
Validates `name` against SQL keywords
Uses self.check_reserve which is a list of operators to use.
"""
for backend in self.check_reserved:
if name.upper() in self.RSK[backend]:
raise SyntaxError(
'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
"""
Example:
Use as::
db.define_table('person',Field('name'),Field('info'))
db.define_table('pet',
Field('ownedby',db.person),
Field('name'),Field('info')
)
@request.restful()
def index():
def GET(*args,**vars):
patterns = [
"/friends[person]",
"/{person.name}/:field",
"/{person.name}/pets[pet.ownedby]",
"/{person.name}/pets[pet.ownedby]/{pet.name}",
"/{person.name}/pets[pet.ownedby]/{pet.name}/:field",
("/dogs[pet]", db.pet.info=='dog'),
("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'),
]
parser = db.parse_as_rest(patterns,args,vars)
if parser.status == 200:
return dict(content=parser.response)
else:
raise HTTP(parser.status,parser.error)
def POST(table_name,**vars):
if table_name == 'person':
return db.person.validate_and_insert(**vars)
elif table_name == 'pet':
return db.pet.validate_and_insert(**vars)
else:
raise HTTP(400)
return locals()
"""
db = self
re1 = REGEX_SEARCH_PATTERN
re2 = REGEX_SQUARE_BRACKETS
def auto_table(table,base='',depth=0):
patterns = []
for field in db[table].fields:
if base:
tag = '%s/%s' % (base,field.replace('_','-'))
else:
tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-'))
f = db[table][field]
if not f.readable: continue
if f.type=='id' or 'slug' in field or f.type.startswith('reference'):
tag += '/{%s.%s}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type.startswith('boolean'):
tag += '/{%s.%s}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type in ('float','double','integer','bigint'):
tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type.startswith('list:'):
tag += '/{%s.%s.contains}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type in ('date','datetime'):
tag+= '/{%s.%s.year}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.month}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.day}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
if f.type in ('datetime','time'):
tag+= '/{%s.%s.hour}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.minute}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.second}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
if depth>0:
for f in db[table]._referenced_by:
tag+='/%s[%s.%s]' % (table,f.tablename,f.name)
patterns.append(tag)
patterns += auto_table(table,base=tag,depth=depth-1)
return patterns
if patterns == 'auto':
patterns=[]
for table in db.tables:
if not table.startswith('auth_'):
patterns.append('/%s[%s]' % (table,table))
patterns += auto_table(table,base='',depth=1)
else:
i = 0
while i<len(patterns):
pattern = patterns[i]
if not isinstance(pattern,str):
pattern = pattern[0]
tokens = pattern.split('/')
if tokens[-1].startswith(':auto') and re2.match(tokens[-1]):
new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1],
'/'.join(tokens[:-1]))
patterns = patterns[:i]+new_patterns+patterns[i+1:]
i += len(new_patterns)
else:
i += 1
if '/'.join(args) == 'patterns':
return Row({'status':200,'pattern':'list',
'error':None,'response':patterns})
for pattern in patterns:
basequery, exposedfields = None, []
if isinstance(pattern,tuple):
if len(pattern)==2:
pattern, basequery = pattern
elif len(pattern)>2:
pattern, basequery, exposedfields = pattern[0:3]
otable=table=None
if not isinstance(queries,dict):
dbset=db(queries)
if basequery is not None:
dbset = dbset(basequery)
i=0
tags = pattern[1:].split('/')
if len(tags)!=len(args):
continue
for tag in tags:
if re1.match(tag):
# print 're1:'+tag
tokens = tag[1:-1].split('.')
table, field = tokens[0], tokens[1]
if not otable or table == otable:
if len(tokens)==2 or tokens[2]=='eq':
query = db[table][field]==args[i]
elif tokens[2]=='ne':
query = db[table][field]!=args[i]
elif tokens[2]=='lt':
query = db[table][field]<args[i]
elif tokens[2]=='gt':
query = db[table][field]>args[i]
elif tokens[2]=='ge':
query = db[table][field]>=args[i]
elif tokens[2]=='le':
query = db[table][field]<=args[i]
elif tokens[2]=='year':
query = db[table][field].year()==args[i]
elif tokens[2]=='month':
query = db[table][field].month()==args[i]
elif tokens[2]=='day':
query = db[table][field].day()==args[i]
elif tokens[2]=='hour':
query = db[table][field].hour()==args[i]
elif tokens[2]=='minute':
query = db[table][field].minutes()==args[i]
elif tokens[2]=='second':
query = db[table][field].seconds()==args[i]
elif tokens[2]=='startswith':
query = db[table][field].startswith(args[i])
elif tokens[2]=='contains':
query = db[table][field].contains(args[i])
else:
raise RuntimeError("invalid pattern: %s" % pattern)
if len(tokens)==4 and tokens[3]=='not':
query = ~query
elif len(tokens)>=4:
raise RuntimeError("invalid pattern: %s" % pattern)
if not otable and isinstance(queries,dict):
dbset = db(queries[table])
if basequery is not None:
dbset = dbset(basequery)
dbset=dbset(query)
else:
raise RuntimeError("missing relation in pattern: %s" % pattern)
elif re2.match(tag) and args[i]==tag[:tag.find('[')]:
ref = tag[tag.find('[')+1:-1]
if '.' in ref and otable:
table,field = ref.split('.')
selfld = '_id'
if db[table][field].type.startswith('reference '):
refs = [ x.name for x in db[otable] if x.type == db[table][field].type ]
else:
refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ]
if refs:
selfld = refs[0]
if nested_select:
try:
dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld])))
except ValueError:
return Row({'status':400,'pattern':pattern,
'error':'invalid path','response':None})
else:
items = [item.id for item in dbset.select(db[otable][selfld])]
dbset=db(db[table][field].belongs(items))
else:
table = ref
if not otable and isinstance(queries,dict):
dbset = db(queries[table])
dbset=dbset(db[table])
elif tag==':field' and table:
# print 're3:'+tag
field = args[i]
if not field in db[table]: break
# hand-built patterns should respect .readable=False as well
if not db[table][field].readable:
return Row({'status':418,'pattern':pattern,
'error':'I\'m a teapot','response':None})
try:
distinct = vars.get('distinct', False) == 'True'
offset = long(vars.get('offset',None) or 0)
limits = (offset,long(vars.get('limit',None) or 1000)+offset)
except ValueError:
return Row({'status':400,'error':'invalid limits','response':None})
items = dbset.select(db[table][field], distinct=distinct, limitby=limits)
if items:
return Row({'status':200,'response':items,
'pattern':pattern})
else:
return Row({'status':404,'pattern':pattern,
'error':'no record found','response':None})
elif tag != args[i]:
break
otable = table
i += 1
if i == len(tags) and table:
if hasattr(db[table], '_id'):
ofields = vars.get('order', db[table]._id.name).split('|')
else:
ofields = vars.get('order', db[table]._primarykey[0]).split('|')
try:
orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields]
except (KeyError, AttributeError):
return Row({'status':400,'error':'invalid orderby','response':None})
if exposedfields:
fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable]
else:
fields = [field for field in db[table] if field.readable]
count = dbset.count()
try:
offset = long(vars.get('offset',None) or 0)
limits = (offset,long(vars.get('limit',None) or 1000)+offset)
except ValueError:
return Row({'status':400,'error':'invalid limits','response':None})
#if count > limits[1]-limits[0]:
# return Row({'status':400,'error':'too many records','response':None})
try:
response = dbset.select(limitby=limits,orderby=orderby,*fields)
except ValueError:
return Row({'status':400,'pattern':pattern,
'error':'invalid path','response':None})
return Row({'status':200,'response':response,
'pattern':pattern,'count':count})
return Row({'status':400,'error':'no matching pattern','response':None})
def define_table(
self,
tablename,
*fields,
**args
):
if not fields and 'fields' in args:
fields = args.get('fields',())
if not isinstance(tablename, str):
if isinstance(tablename, unicode):
try:
tablename = str(tablename)
except UnicodeEncodeError:
raise SyntaxError("invalid unicode table name")
else:
raise SyntaxError("missing table name")
elif hasattr(self,tablename) or tablename in self.tables:
if args.get('redefine',False):
delattr(self, tablename)
else:
raise SyntaxError('table already defined: %s' % tablename)
elif tablename.startswith('_') or hasattr(self,tablename) or \
REGEX_PYTHON_KEYWORDS.match(tablename):
raise SyntaxError('invalid table name: %s' % tablename)
elif self.check_reserved:
self.check_reserved_keyword(tablename)
else:
invalid_args = set(args)-TABLE_ARGS
if invalid_args:
raise SyntaxError('invalid table "%s" attributes: %s' \
% (tablename,invalid_args))
if self._lazy_tables and tablename not in self._LAZY_TABLES:
self._LAZY_TABLES[tablename] = (tablename,fields,args)
table = None
else:
table = self.lazy_define_table(tablename,*fields,**args)
if not tablename in self.tables:
self.tables.append(tablename)
return table
def lazy_define_table(
self,
tablename,
*fields,
**args
):
args_get = args.get
common_fields = self._common_fields
if common_fields:
fields = list(fields) + list(common_fields)
table_class = args_get('table_class',Table)
table = table_class(self, tablename, *fields, **args)
table._actual = True
self[tablename] = table
# must follow above line to handle self references
table._create_references()
for field in table:
if field.requires == DEFAULT:
field.requires = auto_validators(field)
if field.represent is None:
field.represent = auto_represent(field)
migrate = self._migrate_enabled and args_get('migrate',self._migrate)
if migrate and not self._uri in (None,'None') \
or self._adapter.dbengine=='google:datastore':
fake_migrate = self._fake_migrate_all or \
args_get('fake_migrate',self._fake_migrate)
polymodel = args_get('polymodel',None)
try:
GLOBAL_LOCKER.acquire()
self._lastsql = self._adapter.create_table(
table,migrate=migrate,
fake_migrate=fake_migrate,
polymodel=polymodel)
finally:
GLOBAL_LOCKER.release()
else:
table._dbt = None
on_define = args_get('on_define',None)
if on_define: on_define(table)
return table
def as_dict(self, flat=False, sanitize=True):
db_uid = uri = None
if not sanitize:
uri, db_uid = (self._uri, self._db_uid)
db_as_dict = dict(
tables=[],
uri=uri,
db_uid=db_uid,
**dict(
[(k, getattr(self, "_" + k, None)) for k in [
'pool_size', 'folder', 'db_codec', 'check_reserved',
'migrate', 'fake_migrate', 'migrate_enabled',
'fake_migrate_all', 'decode_credentials', 'driver_args',
'adapter_args', 'attempts', 'bigint_id', 'debug',
'lazy_tables', 'do_connect']]))
for table in self:
db_as_dict["tables"].append(table.as_dict(flat=flat,
sanitize=sanitize))
return db_as_dict
def __contains__(self, tablename):
try:
return tablename in self.tables
except AttributeError:
# The instance has no .tables attribute yet
return False
def __iter__(self):
for tablename in self.tables:
yield self[tablename]
def __getitem__(self, key):
return self.__getattr__(str(key))
def __getattr__(self, key):
if object.__getattribute__(self, '_lazy_tables') and \
key in object.__getattribute__(self, '_LAZY_TABLES'):
tablename, fields, args = self._LAZY_TABLES.pop(key)
return self.lazy_define_table(tablename, *fields, **args)
return super(DAL, self).__getattr__(key)
def __setattr__(self, key, value):
if key[:1] != '_' and key in self:
raise SyntaxError(
'Object %s exists and cannot be redefined' % key)
return super(DAL, self).__setattr__(key, value)
def __repr__(self):
if hasattr(self, '_uri'):
return '<DAL uri="%s">' % hide_password(self._adapter.uri)
else:
return '<DAL db_uid="%s">' % self._db_uid
def smart_query(self, fields, text):
return Set(self, smart_query(fields, text))
def __call__(self, query=None, ignore_common_filters=None):
if isinstance(query, Table):
query = self._adapter.id_query(query)
elif isinstance(query, Field):
query = query!=None
elif isinstance(query, dict):
icf = query.get("ignore_common_filters")
if icf: ignore_common_filters = icf
return Set(self, query, ignore_common_filters=ignore_common_filters)
def commit(self):
self._adapter.commit()
def rollback(self):
self._adapter.rollback()
def close(self):
self._adapter.close()
if self._db_uid in THREAD_LOCAL.db_instances:
db_group = THREAD_LOCAL.db_instances[self._db_uid]
db_group.remove(self)
if not db_group:
del THREAD_LOCAL.db_instances[self._db_uid]
def executesql(self, query, placeholders=None, as_dict=False,
fields=None, colnames=None, as_ordered_dict=False):
"""
Executes an arbitrary query
Args:
query (str): the query to submit to the backend
placeholders: is optional and will always be None.
If using raw SQL with placeholders, placeholders may be
a sequence of values to be substituted in
or, (if supported by the DB driver), a dictionary with keys
matching named placeholders in your SQL.
as_dict: will always be None when using DAL.
If using raw SQL can be set to True and the results cursor
returned by the DB driver will be converted to a sequence of
dictionaries keyed with the db field names. Results returned
with as_dict=True are the same as those returned when applying
.to_list() to a DAL query. If "as_ordered_dict"=True the
behaviour is the same as when "as_dict"=True with the keys
(field names) guaranteed to be in the same order as returned
by the select name executed on the database.
fields: list of DAL Fields that match the fields returned from the
DB. The Field objects should be part of one or more Table
objects defined on the DAL object. The "fields" list can include
one or more DAL Table objects in addition to or instead of
including Field objects, or it can be just a single table
(not in a list). In that case, the Field objects will be
extracted from the table(s).
Note:
if either `fields` or `colnames` is provided, the results
will be converted to a DAL `Rows` object using the
`db._adapter.parse()` method
colnames: list of field names in tablename.fieldname format
Note:
It is also possible to specify both "fields" and the associated
"colnames". In that case, "fields" can also include DAL Expression
objects in addition to Field objects. For Field objects in "fields",
the associated "colnames" must still be in tablename.fieldname
format. For Expression objects in "fields", the associated
"colnames" can be any arbitrary labels.
DAL Table objects referred to by "fields" or "colnames" can be dummy
tables and do not have to represent any real tables in the database.
Also, note that the "fields" and "colnames" must be in the
same order as the fields in the results cursor returned from the DB.
"""
adapter = self._adapter
if placeholders:
adapter.execute(query, placeholders)
else:
adapter.execute(query)
if as_dict or as_ordered_dict:
if not hasattr(adapter.cursor,'description'):
raise RuntimeError("database does not support executesql(...,as_dict=True)")
# Non-DAL legacy db query, converts cursor results to dict.
# sequence of 7-item sequences. each sequence tells about a column.
# first item is always the field name according to Python Database API specs
columns = adapter.cursor.description
# reduce the column info down to just the field names
fields = colnames or [f[0] for f in columns]
if len(fields) != len(set(fields)):
raise RuntimeError("Result set includes duplicate column names. Specify unique column names using the 'colnames' argument")
#: avoid bytes strings in columns names (py3)
if columns and not PY2:
for i in range(0, len(fields)):
if isinstance(fields[i], bytes):
fields[i] = fields[i].decode("utf8")
# will hold our finished resultset in a list
data = adapter._fetchall()
# convert the list for each row into a dictionary so it's
# easier to work with. row['field_name'] rather than row[0]
if as_ordered_dict:
_dict = OrderedDict
else:
_dict = dict
return [_dict(zip(fields,row)) for row in data]
try:
data = adapter._fetchall()
except:
return None
if fields or colnames:
fields = [] if fields is None else fields
if not isinstance(fields, list):
fields = [fields]
extracted_fields = []
for field in fields:
if isinstance(field, Table):
extracted_fields.extend([f for f in field])
else:
extracted_fields.append(field)
if not colnames:
colnames = ['%s.%s' % (f.tablename, f.name)
for f in extracted_fields]
data = adapter.parse(
data, fields=extracted_fields, colnames=colnames)
return data
def _remove_references_to(self, thistable):
for table in self:
table._referenced_by = [field for field in table._referenced_by
if not field.table==thistable]
def has_representer(self, name):
return callable(self.representers.get(name))
def represent(self, name, *args, **kwargs):
return self.representers[name](*args, **kwargs)
def export_to_csv_file(self, ofile, *args, **kwargs):
step = long(kwargs.get('max_fetch_rows,',500))
write_colnames = kwargs['write_colnames'] = \
kwargs.get("write_colnames", True)
for table in self.tables:
ofile.write('TABLE %s\r\n' % table)
query = self._adapter.id_query(self[table])
nrows = self(query).count()
kwargs['write_colnames'] = write_colnames
for k in range(0,nrows,step):
self(query).select(limitby=(k,k+step)).export_to_csv_file(
ofile, *args, **kwargs)
kwargs['write_colnames'] = False
ofile.write('\r\n\r\n')
ofile.write('END')
def import_from_csv_file(self, ifile, id_map=None, null='<NULL>',
unique='uuid', map_tablenames=None,
ignore_missing_tables=False,
*args, **kwargs):
#if id_map is None: id_map={}
id_offset = {} # only used if id_map is None
map_tablenames = map_tablenames or {}
for line in ifile:
line = line.strip()
if not line:
continue
elif line == 'END':
return
elif not line.startswith('TABLE ') or \
not line[6:] in self.tables:
raise SyntaxError('invalid file format')
else:
tablename = line[6:]
tablename = map_tablenames.get(tablename,tablename)
if tablename is not None and tablename in self.tables:
self[tablename].import_from_csv_file(
ifile, id_map, null, unique, id_offset,
*args, **kwargs)
elif tablename is None or ignore_missing_tables:
# skip all non-empty lines
for line in ifile:
if not line.strip():
break
else:
raise RuntimeError("Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)")
def can_join(self):
return self._adapter.can_join()
def DAL_unpickler(db_uid):
return DAL('<zombie>', db_uid=db_uid)
def DAL_pickler(db):
return DAL_unpickler, (db._db_uid,)
copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
| ShySec/scrimmage-scoreboard | web2py/gluon/packages/dal/pydal/base.py | Python | gpl-2.0 | 48,012 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.data import connector
from buildbot.data import root
from buildbot.test.util import endpoint
class RootEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = root.RootEndpoint
resourceTypeClass = root.Root
def setUp(self):
self.setUpEndpoint()
self.master.data.rootLinks = [
{'name': u'abc'},
]
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get(self):
rootlinks = yield self.callGet(('',))
[self.validateData(root) for root in rootlinks]
self.assertEqual(rootlinks, [
{'name': u'abc'},
])
class SpecEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = root.SpecEndpoint
resourceTypeClass = root.Spec
def setUp(self):
self.setUpEndpoint()
# replace fakeConnector with real DataConnector
self.master.data.disownServiceParent()
self.master.data = connector.DataConnector()
self.master.data.setServiceParent(self.master)
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get(self):
specs = yield self.callGet(('application.spec',))
[self.validateData(s) for s in specs]
for s in specs:
# only test an endpoint that is reasonably stable
if s['path'] != "master":
continue
self.assertEqual(s,
{'path': 'master',
'type': 'master',
'type_spec': {'fields': [{'name': 'active',
'type': 'boolean',
'type_spec': {'name': 'boolean'}},
{'name': 'masterid',
'type': 'integer',
'type_spec': {'name': 'integer'}},
{'name': 'link',
'type': 'link',
'type_spec': {'name': 'link'}},
{'name': 'name',
'type': 'string',
'type_spec': {'name': 'string'}},
{'name': 'last_active',
'type': 'datetime',
'type_spec': {'name': 'datetime'}}],
'type': 'master'},
'plural': 'masters'})
| seankelly/buildbot | master/buildbot/test/unit/test_data_root.py | Python | gpl-2.0 | 3,723 |
# goto_assignments command tests are different in syntax
definition = 3
#! 0 ['a = definition']
a = definition
#! []
b
#! ['a = definition']
a
b = a
c = b
#! ['c = b']
c
cd = 1
#! 1 ['cd = c']
cd = c
#! 0 ['cd = e']
cd = e
#! ['module math']
import math
#! ['import math']
math
#! ['import math']
b = math
#! ['b = math']
b
class C(object):
def b(self):
#! ['b = math']
b
#! ['def b']
self.b
#! 14 ['def b']
self.b()
#! 11 ['self']
self.b
return 1
#! ['def b']
b
#! ['b = math']
b
#! ['def b']
C.b
#! ['def b']
C().b
#! 0 ['class C']
C().b
#! 0 ['class C']
C().b
D = C
#! ['def b']
D.b
#! ['def b']
D().b
#! 0 ['D = C']
D().b
#! 0 ['D = C']
D().b
def c():
return ''
#! ['def c']
c
#! 0 ['def c']
c()
class ClassVar():
x = 3
#! ['x = 3']
ClassVar.x
#! ['x = 3']
ClassVar().x
# before assignments
#! 10 ['x = 3']
ClassVar.x = ''
#! 12 ['x = 3']
ClassVar().x = ''
# Recurring use of the same var name, github #315
def f(t=None):
#! 9 ['t = None']
t = t or 1
# -----------------
# imports
# -----------------
#! ['module import_tree']
import import_tree
#! ["a = ''"]
import_tree.a
#! ['module mod1']
import import_tree.mod1
#! ['a = 1']
import_tree.mod1.a
#! ['module pkg']
import import_tree.pkg
#! ['a = list']
import_tree.pkg.a
#! ['module mod1']
import import_tree.pkg.mod1
#! ['a = 1.0']
import_tree.pkg.mod1.a
#! ["a = ''"]
import_tree.a
#! ['module mod1']
from import_tree.pkg import mod1
#! ['a = 1.0']
mod1.a
#! ['module mod1']
from import_tree import mod1
#! ['a = 1']
mod1.a
#! ['a = 1.0']
from import_tree.pkg.mod1 import a
#! ['import os']
from .imports import os
#! ['some_variable = 1']
from . import some_variable
# -----------------
# anonymous classes
# -----------------
def func():
class A():
def b(self):
return 1
return A()
#! 8 ['def b']
func().b()
# -----------------
# on itself
# -----------------
#! 7 ['class ClassDef']
class ClassDef():
""" abc """
pass
# -----------------
# params
# -----------------
param = ClassDef
#! 8 ['param']
def ab1(param): pass
#! 9 ['param']
def ab2(param): pass
#! 11 ['param = ClassDef']
def ab3(a=param): pass
ab1(ClassDef);ab2(ClassDef);ab3(ClassDef)
# -----------------
# for loops
# -----------------
for i in range(1):
#! ['for i in range(1): i']
i
for key, value in [(1,2)]:
#! ['for key,value in [(1, 2)]: key']
key
for i in []:
#! ['for i in []: i']
i
| Eddy0402/Environment | vim/ycmd/third_party/jedi/test/completion/goto.py | Python | gpl-3.0 | 2,526 |
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import fcntl
import os
import pipes
import pty
import select
import subprocess
import time
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.plugins.connection import ConnectionBase
from ansible.utils.path import unfrackpath, makedirs_safe
from ansible.utils.unicode import to_bytes, to_unicode, to_str
from ansible.compat.six import text_type, binary_type
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
SSHPASS_AVAILABLE = None
class Connection(ConnectionBase):
''' ssh based connections '''
transport = 'ssh'
has_pipelining = True
become_methods = frozenset(C.BECOME_METHODS).difference(['runas'])
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.host = self._play_context.remote_addr
# The connection is created by running ssh/scp/sftp from the exec_command,
# put_file, and fetch_file methods, so we don't need to do any connection
# management here.
def _connect(self):
return self
@staticmethod
def _sshpass_available():
global SSHPASS_AVAILABLE
# We test once if sshpass is available, and remember the result. It
# would be nice to use distutils.spawn.find_executable for this, but
# distutils isn't always available; shutils.which() is Python3-only.
if SSHPASS_AVAILABLE is None:
try:
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
SSHPASS_AVAILABLE = True
except OSError:
SSHPASS_AVAILABLE = False
return SSHPASS_AVAILABLE
@staticmethod
def _persistence_controls(command):
'''
Takes a command array and scans it for ControlPersist and ControlPath
settings and returns two booleans indicating whether either was found.
This could be smarter, e.g. returning false if ControlPersist is 'no',
but for now we do it simple way.
'''
controlpersist = False
controlpath = False
for arg in command:
if 'controlpersist' in arg.lower():
controlpersist = True
elif 'controlpath' in arg.lower():
controlpath = True
return controlpersist, controlpath
def _add_args(self, explanation, args):
"""
Adds the given args to self._command and displays a caller-supplied
explanation of why they were added.
"""
self._command += args
display.vvvvv('SSH: ' + explanation + ': (%s)' % ')('.join(args), host=self._play_context.remote_addr)
def _build_command(self, binary, *other_args):
'''
Takes a binary (ssh, scp, sftp) and optional extra arguments and returns
a command line as an array that can be passed to subprocess.Popen.
'''
self._command = []
## First, the command name.
# If we want to use password authentication, we have to set up a pipe to
# write the password to sshpass.
if self._play_context.password:
if not self._sshpass_available():
raise AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
self.sshpass_pipe = os.pipe()
self._command += ['sshpass', '-d{0}'.format(self.sshpass_pipe[0])]
self._command += [binary]
## Next, additional arguments based on the configuration.
# sftp batch mode allows us to correctly catch failed transfers, but can
# be disabled if the client side doesn't support the option. However,
# sftp batch mode does not prompt for passwords so it must be disabled
# if not using controlpersist and using sshpass
if binary == 'sftp' and C.DEFAULT_SFTP_BATCH_MODE:
if self._play_context.password:
self._add_args('disable batch mode for sshpass', ['-o', 'BatchMode=no'])
self._command += ['-b', '-']
if self._play_context.verbosity > 3:
self._command += ['-vvv']
elif binary == 'ssh':
# Older versions of ssh (e.g. in RHEL 6) don't accept sftp -q.
self._command += ['-q']
# Next, we add [ssh_connection]ssh_args from ansible.cfg.
if self._play_context.ssh_args:
args = self._split_ssh_args(self._play_context.ssh_args)
self._add_args("ansible.cfg set ssh_args", args)
# Now we add various arguments controlled by configuration file settings
# (e.g. host_key_checking) or inventory variables (ansible_ssh_port) or
# a combination thereof.
if not C.HOST_KEY_CHECKING:
self._add_args(
"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled",
("-o", "StrictHostKeyChecking=no")
)
if self._play_context.port is not None:
self._add_args(
"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set",
("-o", "Port={0}".format(self._play_context.port))
)
key = self._play_context.private_key_file
if key:
self._add_args(
"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set",
("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(key)))
)
if not self._play_context.password:
self._add_args(
"ansible_password/ansible_ssh_pass not set", (
"-o", "KbdInteractiveAuthentication=no",
"-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
"-o", "PasswordAuthentication=no"
)
)
user = self._play_context.remote_user
if user:
self._add_args(
"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set",
("-o", "User={0}".format(to_bytes(self._play_context.remote_user)))
)
self._add_args(
"ANSIBLE_TIMEOUT/timeout set",
("-o", "ConnectTimeout={0}".format(self._play_context.timeout))
)
# Add in any common or binary-specific arguments from the PlayContext
# (i.e. inventory or task settings or overrides on the command line).
for opt in ['ssh_common_args', binary + '_extra_args']:
attr = getattr(self._play_context, opt, None)
if attr is not None:
args = self._split_ssh_args(attr)
self._add_args("PlayContext set %s" % opt, args)
# Check if ControlPersist is enabled and add a ControlPath if one hasn't
# already been set.
controlpersist, controlpath = self._persistence_controls(self._command)
if controlpersist:
self._persistent = True
if not controlpath:
cpdir = unfrackpath('$HOME/.ansible/cp')
# The directory must exist and be writable.
makedirs_safe(cpdir, 0o700)
if not os.access(cpdir, os.W_OK):
raise AnsibleError("Cannot write to ControlPath %s" % cpdir)
args = ("-o", "ControlPath={0}".format(
to_bytes(C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=cpdir)))
)
self._add_args("found only ControlPersist; added ControlPath", args)
## Finally, we add any caller-supplied extras.
if other_args:
self._command += other_args
return self._command
def _send_initial_data(self, fh, in_data):
'''
Writes initial data to the stdin filehandle of the subprocess and closes
it. (The handle must be closed; otherwise, for example, "sftp -b -" will
just hang forever waiting for more commands.)
'''
display.debug('Sending initial data')
try:
fh.write(in_data)
fh.close()
except (OSError, IOError):
raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
display.debug('Sent initial data (%d bytes)' % len(in_data))
# Used by _run() to kill processes on failures
@staticmethod
def _terminate_process(p):
""" Terminate a process, ignoring errors """
try:
p.terminate()
except (OSError, IOError):
pass
# This is separate from _run() because we need to do the same thing for stdout
# and stderr.
def _examine_output(self, source, state, chunk, sudoable):
'''
Takes a string, extracts complete lines from it, tests to see if they
are a prompt, error message, etc., and sets appropriate flags in self.
Prompt and success lines are removed.
Returns the processed (i.e. possibly-edited) output and the unprocessed
remainder (to be processed with the next chunk) as strings.
'''
output = []
for l in chunk.splitlines(True):
suppress_output = False
#display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
if self._play_context.prompt and self.check_password_prompt(l):
display.debug("become_prompt: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
self._flags['become_prompt'] = True
suppress_output = True
elif self._play_context.success_key and self.check_become_success(l):
display.debug("become_success: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
self._flags['become_success'] = True
suppress_output = True
elif sudoable and self.check_incorrect_password(l):
display.debug("become_error: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
self._flags['become_error'] = True
elif sudoable and self.check_missing_password(l):
display.debug("become_nopasswd_error: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
self._flags['become_nopasswd_error'] = True
if not suppress_output:
output.append(l)
# The chunk we read was most likely a series of complete lines, but just
# in case the last line was incomplete (and not a prompt, which we would
# have removed from the output), we retain it to be processed with the
# next chunk.
remainder = ''
if output and not output[-1].endswith('\n'):
remainder = output[-1]
output = output[:-1]
return ''.join(output), remainder
def _run(self, cmd, in_data, sudoable=True):
'''
Starts the command and communicates with it until it ends.
'''
display_cmd = map(to_unicode, map(pipes.quote, cmd))
display.vvv(u'SSH: EXEC {0}'.format(u' '.join(display_cmd)), host=self.host)
# Start the given command. If we don't need to pipeline data, we can try
# to use a pseudo-tty (ssh will have been invoked with -tt). If we are
# pipelining data, or can't create a pty, we fall back to using plain
# old pipes.
p = None
if isinstance(cmd, (text_type, binary_type)):
cmd = to_bytes(cmd)
else:
cmd = list(map(to_bytes, cmd))
if not in_data:
try:
# Make sure stdin is a proper pty to avoid tcgetattr errors
master, slave = pty.openpty()
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = os.fdopen(master, 'w', 0)
os.close(slave)
except (OSError, IOError):
p = None
if not p:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = p.stdin
# If we are using SSH password authentication, write the password into
# the pipe we opened in _build_command.
if self._play_context.password:
os.close(self.sshpass_pipe[0])
try:
os.write(self.sshpass_pipe[1], "{0}\n".format(to_bytes(self._play_context.password)))
except OSError as e:
# Ignore broken pipe errors if the sshpass process has exited.
if e.errno != errno.EPIPE or p.poll() is None:
raise
os.close(self.sshpass_pipe[1])
## SSH state machine
#
# Now we read and accumulate output from the running process until it
# exits. Depending on the circumstances, we may also need to write an
# escalation password and/or pipelined input to the process.
states = [
'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit'
]
# Are we requesting privilege escalation? Right now, we may be invoked
# to execute sftp/scp with sudoable=True, but we can request escalation
# only when using ssh. Otherwise we can send initial data straightaway.
state = states.index('ready_to_send')
if b'ssh' in cmd:
if self._play_context.prompt:
# We're requesting escalation with a password, so we have to
# wait for a password prompt.
state = states.index('awaiting_prompt')
display.debug('Initial state: %s: %s' % (states[state], self._play_context.prompt))
elif self._play_context.become and self._play_context.success_key:
# We're requesting escalation without a password, so we have to
# detect success/failure before sending any initial data.
state = states.index('awaiting_escalation')
display.debug('Initial state: %s: %s' % (states[state], self._play_context.success_key))
# We store accumulated stdout and stderr output from the process here,
# but strip any privilege escalation prompt/confirmation lines first.
# Output is accumulated into tmp_*, complete lines are extracted into
# an array, then checked and removed or copied to stdout or stderr. We
# set any flags based on examining the output in self._flags.
stdout = stderr = ''
tmp_stdout = tmp_stderr = ''
self._flags = dict(
become_prompt=False, become_success=False,
become_error=False, become_nopasswd_error=False
)
# select timeout should be longer than the connect timeout, otherwise
# they will race each other when we can't connect, and the connect
# timeout usually fails
timeout = 2 + self._play_context.timeout
rpipes = [p.stdout, p.stderr]
for fd in rpipes:
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
# If we can send initial data without waiting for anything, we do so
# before we call select.
if states[state] == 'ready_to_send' and in_data:
self._send_initial_data(stdin, in_data)
state += 1
while True:
rfd, wfd, efd = select.select(rpipes, [], [], timeout)
# We pay attention to timeouts only while negotiating a prompt.
if not rfd:
if state <= states.index('awaiting_escalation'):
# If the process has already exited, then it's not really a
# timeout; we'll let the normal error handling deal with it.
if p.poll() is not None:
break
self._terminate_process(p)
raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, stdout))
# Read whatever output is available on stdout and stderr, and stop
# listening to the pipe if it's been closed.
if p.stdout in rfd:
chunk = p.stdout.read()
if chunk == '':
rpipes.remove(p.stdout)
tmp_stdout += chunk
display.debug("stdout chunk (state=%s):\n>>>%s<<<\n" % (state, chunk))
if p.stderr in rfd:
chunk = p.stderr.read()
if chunk == '':
rpipes.remove(p.stderr)
tmp_stderr += chunk
display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, chunk))
# We examine the output line-by-line until we have negotiated any
# privilege escalation prompt and subsequent success/error message.
# Afterwards, we can accumulate output without looking at it.
if state < states.index('ready_to_send'):
if tmp_stdout:
output, unprocessed = self._examine_output('stdout', states[state], tmp_stdout, sudoable)
stdout += output
tmp_stdout = unprocessed
if tmp_stderr:
output, unprocessed = self._examine_output('stderr', states[state], tmp_stderr, sudoable)
stderr += output
tmp_stderr = unprocessed
else:
stdout += tmp_stdout
stderr += tmp_stderr
tmp_stdout = tmp_stderr = ''
# If we see a privilege escalation prompt, we send the password.
# (If we're expecting a prompt but the escalation succeeds, we
# didn't need the password and can carry on regardless.)
if states[state] == 'awaiting_prompt':
if self._flags['become_prompt']:
display.debug('Sending become_pass in response to prompt')
stdin.write('{0}\n'.format(to_bytes(self._play_context.become_pass )))
self._flags['become_prompt'] = False
state += 1
elif self._flags['become_success']:
state += 1
# We've requested escalation (with or without a password), now we
# wait for an error message or a successful escalation.
if states[state] == 'awaiting_escalation':
if self._flags['become_success']:
display.debug('Escalation succeeded')
self._flags['become_success'] = False
state += 1
elif self._flags['become_error']:
display.debug('Escalation failed')
self._terminate_process(p)
self._flags['become_error'] = False
raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
elif self._flags['become_nopasswd_error']:
display.debug('Escalation requires password')
self._terminate_process(p)
self._flags['become_nopasswd_error'] = False
raise AnsibleError('Missing %s password' % self._play_context.become_method)
elif self._flags['become_prompt']:
# This shouldn't happen, because we should see the "Sorry,
# try again" message first.
display.debug('Escalation prompt repeated')
self._terminate_process(p)
self._flags['become_prompt'] = False
raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
# Once we're sure that the privilege escalation prompt, if any, has
# been dealt with, we can send any initial data and start waiting
# for output.
if states[state] == 'ready_to_send':
if in_data:
self._send_initial_data(stdin, in_data)
state += 1
# Now we're awaiting_exit: has the child process exited? If it has,
# and we've read all available output from it, we're done.
if p.poll() is not None:
if not rpipes or not rfd:
break
# When ssh has ControlMaster (+ControlPath/Persist) enabled, the
# first connection goes into the background and we never see EOF
# on stderr. If we see EOF on stdout and the process has exited,
# we're probably done. We call select again with a zero timeout,
# just to make certain we don't miss anything that may have been
# written to stderr between the time we called select() and when
# we learned that the process had finished.
if p.stdout not in rpipes:
timeout = 0
continue
# If the process has not yet exited, but we've already read EOF from
# its stdout and stderr (and thus removed both from rpipes), we can
# just wait for it to exit.
elif not rpipes:
p.wait()
break
# Otherwise there may still be outstanding data to read.
# close stdin after process is terminated and stdout/stderr are read
# completely (see also issue #848)
stdin.close()
if C.HOST_KEY_CHECKING:
if cmd[0] == b"sshpass" and p.returncode == 6:
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr
if p.returncode != 0 and controlpersisterror:
raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
if p.returncode == 255 and in_data:
raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
return (p.returncode, stdout, stderr)
def _exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
# we can only use tty when we are not pipelining the modules. piping
# data into /usr/bin/python inside a tty automatically invokes the
# python interactive-mode but the modules are not compatible with the
# interactive-mode ("unexpected indent" mainly because of empty lines)
if not in_data and sudoable:
args = ('ssh', '-tt', self.host, cmd)
else:
args = ('ssh', self.host, cmd)
cmd = self._build_command(*args)
(returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
return (returncode, stdout, stderr)
#
# Main public methods
#
def exec_command(self, *args, **kwargs):
"""
Wrapper around _exec_command to retry in the case of an ssh failure
Will retry if:
* an exception is caught
* ssh returns 255
Will not retry if
* remaining_tries is <2
* retries limit reached
"""
remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1
cmd_summary = "%s..." % args[0]
for attempt in range(remaining_tries):
try:
return_tuple = self._exec_command(*args, **kwargs)
# 0 = success
# 1-254 = remote command return code
# 255 = failure from the ssh command itself
if return_tuple[0] != 255:
break
else:
raise AnsibleConnectionFailure("Failed to connect to the host via ssh.")
except (AnsibleConnectionFailure, Exception) as e:
if attempt == remaining_tries - 1:
raise
else:
pause = 2 ** attempt - 1
if pause > 30:
pause = 30
if isinstance(e, AnsibleConnectionFailure):
msg = "ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt, cmd_summary, pause)
else:
msg = "ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds" % (attempt, e, cmd_summary, pause)
display.vv(msg, host=self.host)
time.sleep(pause)
continue
return return_tuple
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
if not os.path.exists(to_bytes(in_path, errors='strict')):
raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_str(in_path)))
# scp and sftp require square brackets for IPv6 addresses, but
# accept them for hostnames and IPv4 addresses too.
host = '[%s]' % self.host
if C.DEFAULT_SCP_IF_SSH:
cmd = self._build_command('scp', in_path, u'{0}:{1}'.format(host, pipes.quote(out_path)))
in_data = None
else:
cmd = self._build_command('sftp', to_bytes(host))
in_data = u"put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
in_data = to_bytes(in_data, nonstring='passthru')
(returncode, stdout, stderr) = self._run(cmd, in_data)
if returncode != 0:
raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}".format(to_str(out_path), to_str(stdout), to_str(stderr)))
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
# scp and sftp require square brackets for IPv6 addresses, but
# accept them for hostnames and IPv4 addresses too.
host = '[%s]' % self.host
if C.DEFAULT_SCP_IF_SSH:
cmd = self._build_command('scp', u'{0}:{1}'.format(host, pipes.quote(in_path)), out_path)
in_data = None
else:
cmd = self._build_command('sftp', host)
in_data = u"get {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
in_data = to_bytes(in_data, nonstring='passthru')
(returncode, stdout, stderr) = self._run(cmd, in_data)
if returncode != 0:
raise AnsibleError("failed to transfer file from {0}:\n{1}\n{2}".format(in_path, stdout, stderr))
def close(self):
# If we have a persistent ssh connection (ControlPersist), we can ask it
# to stop listening. Otherwise, there's nothing to do here.
# TODO: reenable once winrm issues are fixed
# temporarily disabled as we are forced to currently close connections after every task because of winrm
# if self._connected and self._persistent:
# cmd = self._build_command('ssh', '-O', 'stop', self.host)
#
# cmd = map(to_bytes, cmd)
# p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stdout, stderr = p.communicate()
self._connected = False
| filipenf/ansible | lib/ansible/plugins/connection/ssh.py | Python | gpl-3.0 | 29,029 |
# urllib2 work-alike interface
# ...from urllib2...
from urllib2 import \
URLError, \
HTTPError, \
BaseHandler, \
UnknownHandler, \
FTPHandler, \
CacheFTPHandler
# ...and from mechanize
from _auth import \
HTTPPasswordMgr, \
HTTPPasswordMgrWithDefaultRealm, \
AbstractBasicAuthHandler, \
AbstractDigestAuthHandler, \
HTTPProxyPasswordMgr, \
ProxyHandler, \
ProxyBasicAuthHandler, \
ProxyDigestAuthHandler, \
HTTPBasicAuthHandler, \
HTTPDigestAuthHandler, \
HTTPSClientCertMgr
from _debug import \
HTTPResponseDebugProcessor, \
HTTPRedirectDebugProcessor
from _file import \
FileHandler
# crap ATM
## from _gzip import \
## HTTPGzipProcessor
from _http import \
HTTPHandler, \
HTTPDefaultErrorHandler, \
HTTPRedirectHandler, \
HTTPEquivProcessor, \
HTTPCookieProcessor, \
HTTPRefererProcessor, \
HTTPRefreshProcessor, \
HTTPErrorProcessor, \
HTTPRobotRulesProcessor, \
RobotExclusionError
import httplib
if hasattr(httplib, 'HTTPS'):
from _http import HTTPSHandler
del httplib
from _opener import OpenerDirector, \
SeekableResponseOpener, \
build_opener, install_opener, urlopen
from _request import \
Request
from _seek import \
SeekableProcessor
from _upgrade import \
HTTPRequestUpgradeProcessor, \
ResponseUpgradeProcessor
| deanhiller/databus | webapp/play1.3.x/samples-and-tests/i-am-a-developer/mechanize/_urllib2.py | Python | mpl-2.0 | 1,415 |
# -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models.website import unslug
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.tools.translate import _
import time
import werkzeug.urls
class WebsiteMembership(http.Controller):
_references_per_page = 20
@http.route([
'/members',
'/members/page/<int:page>',
'/members/association/<membership_id>',
'/members/association/<membership_id>/page/<int:page>',
'/members/country/<int:country_id>',
'/members/country/<country_name>-<int:country_id>',
'/members/country/<int:country_id>/page/<int:page>',
'/members/country/<country_name>-<int:country_id>/page/<int:page>',
'/members/association/<membership_id>/country/<country_name>-<int:country_id>',
'/members/association/<membership_id>/country/<int:country_id>',
'/members/association/<membership_id>/country/<country_name>-<int:country_id>/page/<int:page>',
'/members/association/<membership_id>/country/<int:country_id>/page/<int:page>',
], type='http', auth="public", website=True)
def members(self, membership_id=None, country_name=None, country_id=0, page=1, **post):
cr, uid, context = request.cr, request.uid, request.context
product_obj = request.registry['product.product']
country_obj = request.registry['res.country']
membership_line_obj = request.registry['membership.membership_line']
partner_obj = request.registry['res.partner']
post_name = post.get('search') or post.get('name', '')
current_country = None
today = time.strftime(DEFAULT_SERVER_DATE_FORMAT)
# base domain for groupby / searches
base_line_domain = [
("partner.website_published", "=", True), ('state', '=', 'paid'),
('date_to', '>=', today), ('date_from', '<=', today)
]
if membership_id and membership_id != 'free':
membership_id = int(membership_id)
base_line_domain.append(('membership_id', '=', membership_id))
membership = product_obj.browse(cr, uid, membership_id, context=context)
else:
membership = None
if post_name:
base_line_domain += ['|', ('partner.name', 'ilike', post_name),
('partner.website_description', 'ilike', post_name)]
# group by country, based on all customers (base domain)
if membership_id != 'free':
membership_line_ids = membership_line_obj.search(cr, SUPERUSER_ID, base_line_domain, context=context)
country_domain = [('member_lines', 'in', membership_line_ids)]
if not membership_id:
country_domain = ['|', country_domain[0], ('membership_state', '=', 'free')]
else:
membership_line_ids = []
country_domain = [('membership_state', '=', 'free')]
if post_name:
country_domain += ['|', ('name', 'ilike', post_name),
('website_description', 'ilike', post_name)]
countries = partner_obj.read_group(
cr, SUPERUSER_ID, country_domain + [("website_published", "=", True)], ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
countries_total = sum(country_dict['country_id_count'] for country_dict in countries)
line_domain = list(base_line_domain)
if country_id:
line_domain.append(('partner.country_id', '=', country_id))
current_country = country_obj.read(cr, uid, country_id, ['id', 'name'], context)
if not any(x['country_id'][0] == country_id for x in countries if x['country_id']):
countries.append({
'country_id_count': 0,
'country_id': (country_id, current_country["name"])
})
countries = filter(lambda d:d['country_id'], countries)
countries.sort(key=lambda d: d['country_id'][1])
countries.insert(0, {
'country_id_count': countries_total,
'country_id': (0, _("All Countries"))
})
# format domain for group_by and memberships
membership_ids = product_obj.search(cr, uid, [('membership', '=', True)], order="website_sequence", context=context)
memberships = product_obj.browse(cr, uid, membership_ids, context=context)
# make sure we don't access to lines with unpublished membershipts
line_domain.append(('membership_id', 'in', membership_ids))
limit = self._references_per_page
offset = limit * (page - 1)
count_members = 0
membership_line_ids = []
# displayed non-free membership lines
if membership_id != 'free':
count_members = membership_line_obj.search_count(cr, SUPERUSER_ID, line_domain, context=context)
if offset <= count_members:
membership_line_ids = tuple(membership_line_obj.search(cr, SUPERUSER_ID, line_domain, offset, limit, context=context))
membership_lines = membership_line_obj.browse(cr, uid, membership_line_ids, context=context)
# TODO: Following line can be deleted in master. Kept for retrocompatibility.
membership_lines = sorted(membership_lines, key=lambda x: x.membership_id.website_sequence)
page_partner_ids = set(m.partner.id for m in membership_lines)
google_map_partner_ids = []
if request.env.ref('website_membership.opt_index_google_map').customize_show:
membership_line_ids = membership_line_obj.search(cr, uid, line_domain, context=context)
google_map_partner_ids = membership_line_obj.get_published_companies(cr, uid, membership_line_ids, limit=2000, context=context)
search_domain = [('membership_state', '=', 'free'), ('website_published', '=', True)]
if post_name:
search_domain += ['|', ('name', 'ilike', post_name), ('website_description', 'ilike', post_name)]
if country_id:
search_domain += [('country_id', '=', country_id)]
free_partner_ids = partner_obj.search(cr, SUPERUSER_ID, search_domain, context=context)
memberships_data = []
for membership_record in memberships:
memberships_data.append({'id': membership_record.id, 'name': membership_record.name})
memberships_partner_ids = {}
for line in membership_lines:
memberships_partner_ids.setdefault(line.membership_id.id, []).append(line.partner.id)
if free_partner_ids:
memberships_data.append({'id': 'free', 'name': _('Free Members')})
if not membership_id or membership_id == 'free':
if count_members < offset + limit:
free_start = max(offset - count_members, 0)
free_end = max(offset + limit - count_members, 0)
memberships_partner_ids['free'] = free_partner_ids[free_start:free_end]
page_partner_ids |= set(memberships_partner_ids['free'])
google_map_partner_ids += free_partner_ids[:2000-len(google_map_partner_ids)]
count_members += len(free_partner_ids)
google_map_partner_ids = ",".join(map(str, google_map_partner_ids))
google_maps_api_key = request.env['ir.config_parameter'].sudo().get_param('google_maps_api_key')
partners = { p.id: p for p in partner_obj.browse(request.cr, SUPERUSER_ID, list(page_partner_ids), request.context)}
base_url = '/members%s%s' % ('/association/%s' % membership_id if membership_id else '',
'/country/%s' % country_id if country_id else '')
# request pager for lines
pager = request.website.pager(url=base_url, total=count_members, page=page, step=limit, scope=7, url_args=post)
values = {
'partners': partners,
'membership_lines': membership_lines, # TODO: This line can be deleted in master. Kept for retrocompatibility.
'memberships': memberships, # TODO: This line too.
'membership': membership, # TODO: This line too.
'memberships_data': memberships_data,
'memberships_partner_ids': memberships_partner_ids,
'membership_id': membership_id,
'countries': countries,
'current_country': current_country and [current_country['id'], current_country['name']] or None,
'current_country_id': current_country and current_country['id'] or 0,
'google_map_partner_ids': google_map_partner_ids,
'pager': pager,
'post': post,
'search': "?%s" % werkzeug.url_encode(post),
'google_maps_api_key': google_maps_api_key,
}
return request.website.render("website_membership.index", values)
# Do not use semantic controller due to SUPERUSER_ID
@http.route(['/members/<partner_id>'], type='http', auth="public", website=True)
def partners_detail(self, partner_id, **post):
_, partner_id = unslug(partner_id)
if partner_id:
partner = request.registry['res.partner'].browse(request.cr, SUPERUSER_ID, partner_id, context=request.context)
if partner.exists() and partner.website_published:
values = {}
values['main_object'] = values['partner'] = partner
return request.website.render("website_membership.partner", values)
return self.members(**post)
| be-cloud-be/horizon-addons | server/addons/website_membership/controllers/main.py | Python | agpl-3.0 | 9,678 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Admin Passkey module for Odoo
# Copyright (C) 2013-2014 GRAP (http://www.grap.coop)
# @author Sylvain LE GAL (https://twitter.com/legalsylvain)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_auth_admin_passkey
| acsone/server-tools | auth_admin_passkey/tests/__init__.py | Python | agpl-3.0 | 1,069 |
"""
Middleware to auto-expire inactive sessions after N seconds, which is configurable in
settings.
To enable this feature, set in a settings.py:
SESSION_INACTIVITY_TIMEOUT_IN_SECS = 300
This was taken from StackOverflow (http://stackoverflow.com/questions/14830669/how-to-expire-django-session-in-5minutes)
"""
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib import auth
from django.utils.deprecation import MiddlewareMixin
LAST_TOUCH_KEYNAME = 'SessionInactivityTimeout:last_touch'
class SessionInactivityTimeout(MiddlewareMixin):
"""
Middleware class to keep track of activity on a given session
"""
def process_request(self, request):
"""
Standard entry point for processing requests in Django
"""
if not hasattr(request, "user") or not request.user.is_authenticated:
#Can't log out if not logged in
return
timeout_in_seconds = getattr(settings, "SESSION_INACTIVITY_TIMEOUT_IN_SECONDS", None)
# Do we have this feature enabled?
if timeout_in_seconds:
# what time is it now?
utc_now = datetime.utcnow()
# Get the last time user made a request to server, which is stored in session data
last_touch = request.session.get(LAST_TOUCH_KEYNAME)
# have we stored a 'last visited' in session? NOTE: first time access after login
# this key will not be present in the session data
if last_touch:
# compute the delta since last time user came to the server
time_since_last_activity = utc_now - last_touch
# did we exceed the timeout limit?
if time_since_last_activity > timedelta(seconds=timeout_in_seconds):
# yes? Then log the user out
del request.session[LAST_TOUCH_KEYNAME]
auth.logout(request)
return
request.session[LAST_TOUCH_KEYNAME] = utc_now
| eduNEXT/edx-platform | openedx/core/djangoapps/session_inactivity_timeout/middleware.py | Python | agpl-3.0 | 2,044 |
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
try:
from itertools import product
except ImportError:
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
class SortingNetwork(list):
"""Sorting network class.
From Wikipedia : A sorting network is an abstract mathematical model
of a network of wires and comparator modules that is used to sort a
sequence of numbers. Each comparator connects two wires and sort the
values by outputting the smaller value to one wire, and a larger
value to the other.
"""
def __init__(self, dimension, connectors = []):
self.dimension = dimension
for wire1, wire2 in connectors:
self.addConnector(wire1, wire2)
def addConnector(self, wire1, wire2):
"""Add a connector between wire1 and wire2 in the network."""
if wire1 == wire2:
return
if wire1 > wire2:
wire1, wire2 = wire2, wire1
try:
last_level = self[-1]
except IndexError:
# Empty network, create new level and connector
self.append([(wire1, wire2)])
return
for wires in last_level:
if wires[1] >= wire1 and wires[0] <= wire2:
self.append([(wire1, wire2)])
return
last_level.append((wire1, wire2))
def sort(self, values):
"""Sort the values in-place based on the connectors in the network."""
for level in self:
for wire1, wire2 in level:
if values[wire1] > values[wire2]:
values[wire1], values[wire2] = values[wire2], values[wire1]
def assess(self, cases=None):
"""Try to sort the **cases** using the network, return the number of
misses. If **cases** is None, test all possible cases according to
the network dimensionality.
"""
if cases is None:
cases = product(range(2), repeat=self.dimension)
misses = 0
ordered = [[0]*(self.dimension-i) + [1]*i for i in range(self.dimension+1)]
for sequence in cases:
sequence = list(sequence)
self.sort(sequence)
misses += (sequence != ordered[sum(sequence)])
return misses
def draw(self):
"""Return an ASCII representation of the network."""
str_wires = [["-"]*7 * self.depth]
str_wires[0][0] = "0"
str_wires[0][1] = " o"
str_spaces = []
for i in range(1, self.dimension):
str_wires.append(["-"]*7 * self.depth)
str_spaces.append([" "]*7 * self.depth)
str_wires[i][0] = str(i)
str_wires[i][1] = " o"
for index, level in enumerate(self):
for wire1, wire2 in level:
str_wires[wire1][(index+1)*6] = "x"
str_wires[wire2][(index+1)*6] = "x"
for i in range(wire1, wire2):
str_spaces[i][(index+1)*6+1] = "|"
for i in range(wire1+1, wire2):
str_wires[i][(index+1)*6] = "|"
network_draw = "".join(str_wires[0])
for line, space in zip(str_wires[1:], str_spaces):
network_draw += "\n"
network_draw += "".join(space)
network_draw += "\n"
network_draw += "".join(line)
return network_draw
@property
def depth(self):
"""Return the number of parallel steps that it takes to sort any input.
"""
return len(self)
@property
def length(self):
"""Return the number of comparison-swap used."""
return sum(len(level) for level in self)
| IGITUGraz/scoop | examples/dependency/sortingnetwork.py | Python | lgpl-3.0 | 4,724 |
# 3dproj.py
#
"""
Various transforms used for by the 3D code
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from matplotlib.collections import LineCollection
from matplotlib.patches import Circle
import numpy as np
import numpy.linalg as linalg
def line2d(p0, p1):
"""
Return 2D equation of line in the form ax+by+c = 0
"""
# x + x1 = 0
x0, y0 = p0[:2]
x1, y1 = p1[:2]
#
if x0 == x1:
a = -1
b = 0
c = x1
elif y0 == y1:
a = 0
b = 1
c = -y1
else:
a = (y0-y1)
b = (x0-x1)
c = (x0*y1 - x1*y0)
return a, b, c
def line2d_dist(l, p):
"""
Distance from line to point
line is a tuple of coefficients a,b,c
"""
a, b, c = l
x0, y0 = p
return abs((a*x0 + b*y0 + c)/np.sqrt(a**2+b**2))
def line2d_seg_dist(p1, p2, p0):
"""distance(s) from line defined by p1 - p2 to point(s) p0
p0[0] = x(s)
p0[1] = y(s)
intersection point p = p1 + u*(p2-p1)
and intersection point lies within segment if u is between 0 and 1
"""
x21 = p2[0] - p1[0]
y21 = p2[1] - p1[1]
x01 = np.asarray(p0[0]) - p1[0]
y01 = np.asarray(p0[1]) - p1[1]
u = (x01*x21 + y01*y21)/float(abs(x21**2 + y21**2))
u = np.clip(u, 0, 1)
d = np.sqrt((x01 - u*x21)**2 + (y01 - u*y21)**2)
return d
def test_lines_dists():
import pylab
ax = pylab.gca()
xs, ys = (0,30), (20,150)
pylab.plot(xs, ys)
points = list(zip(xs, ys))
p0, p1 = points
xs, ys = (0,0,20,30), (100,150,30,200)
pylab.scatter(xs, ys)
dist = line2d_seg_dist(p0, p1, (xs[0], ys[0]))
dist = line2d_seg_dist(p0, p1, np.array((xs, ys)))
for x, y, d in zip(xs, ys, dist):
c = Circle((x, y), d, fill=0)
ax.add_patch(c)
pylab.xlim(-200, 200)
pylab.ylim(-200, 200)
pylab.show()
def mod(v):
"""3d vector length"""
return np.sqrt(v[0]**2+v[1]**2+v[2]**2)
def world_transformation(xmin, xmax,
ymin, ymax,
zmin, zmax):
dx, dy, dz = (xmax-xmin), (ymax-ymin), (zmax-zmin)
return np.array([
[1.0/dx,0,0,-xmin/dx],
[0,1.0/dy,0,-ymin/dy],
[0,0,1.0/dz,-zmin/dz],
[0,0,0,1.0]])
def test_world():
xmin, xmax = 100, 120
ymin, ymax = -100, 100
zmin, zmax = 0.1, 0.2
M = world_transformation(xmin, xmax, ymin, ymax, zmin, zmax)
print(M)
def view_transformation(E, R, V):
n = (E - R)
## new
# n /= mod(n)
# u = np.cross(V,n)
# u /= mod(u)
# v = np.cross(n,u)
# Mr = np.diag([1.]*4)
# Mt = np.diag([1.]*4)
# Mr[:3,:3] = u,v,n
# Mt[:3,-1] = -E
## end new
## old
n = n / mod(n)
u = np.cross(V, n)
u = u / mod(u)
v = np.cross(n, u)
Mr = [[u[0],u[1],u[2],0],
[v[0],v[1],v[2],0],
[n[0],n[1],n[2],0],
[0, 0, 0, 1],
]
#
Mt = [[1, 0, 0, -E[0]],
[0, 1, 0, -E[1]],
[0, 0, 1, -E[2]],
[0, 0, 0, 1]]
## end old
return np.dot(Mr, Mt)
def persp_transformation(zfront, zback):
a = (zfront+zback)/(zfront-zback)
b = -2*(zfront*zback)/(zfront-zback)
return np.array([[1,0,0,0],
[0,1,0,0],
[0,0,a,b],
[0,0,-1,0]
])
def proj_transform_vec(vec, M):
vecw = np.dot(M, vec)
w = vecw[3]
# clip here..
txs, tys, tzs = vecw[0]/w, vecw[1]/w, vecw[2]/w
return txs, tys, tzs
def proj_transform_vec_clip(vec, M):
vecw = np.dot(M, vec)
w = vecw[3]
# clip here..
txs, tys, tzs = vecw[0]/w, vecw[1]/w, vecw[2]/w
tis = (vecw[0] >= 0) * (vecw[0] <= 1) * (vecw[1] >= 0) * (vecw[1] <= 1)
if np.sometrue(tis):
tis = vecw[1] < 1
return txs, tys, tzs, tis
def inv_transform(xs, ys, zs, M):
iM = linalg.inv(M)
vec = vec_pad_ones(xs, ys, zs)
vecr = np.dot(iM, vec)
try:
vecr = vecr/vecr[3]
except OverflowError:
pass
return vecr[0], vecr[1], vecr[2]
def vec_pad_ones(xs, ys, zs):
try:
try:
vec = np.array([xs,ys,zs,np.ones(xs.shape)])
except (AttributeError,TypeError):
vec = np.array([xs,ys,zs,np.ones((len(xs)))])
except TypeError:
vec = np.array([xs,ys,zs,1])
return vec
def proj_transform(xs, ys, zs, M):
"""
Transform the points by the projection matrix
"""
vec = vec_pad_ones(xs, ys, zs)
return proj_transform_vec(vec, M)
def proj_transform_clip(xs, ys, zs, M):
"""
Transform the points by the projection matrix
and return the clipping result
returns txs,tys,tzs,tis
"""
vec = vec_pad_ones(xs, ys, zs)
return proj_transform_vec_clip(vec, M)
transform = proj_transform
def proj_points(points, M):
return list(zip(*proj_trans_points(points, M)))
def proj_trans_points(points, M):
xs, ys, zs = list(zip(*points))
return proj_transform(xs, ys, zs, M)
def proj_trans_clip_points(points, M):
xs, ys, zs = list(zip(*points))
return proj_transform_clip(xs, ys, zs, M)
def test_proj_draw_axes(M, s=1):
import pylab
xs, ys, zs = [0, s, 0, 0], [0, 0, s, 0], [0, 0, 0, s]
txs, tys, tzs = proj_transform(xs, ys, zs, M)
o, ax, ay, az = (txs[0], tys[0]), (txs[1], tys[1]), \
(txs[2], tys[2]), (txs[3], tys[3])
lines = [(o, ax), (o, ay), (o, az)]
ax = pylab.gca()
linec = LineCollection(lines)
ax.add_collection(linec)
for x, y, t in zip(txs, tys, ['o', 'x', 'y', 'z']):
pylab.text(x, y, t)
def test_proj_make_M(E=None):
# eye point
E = E or np.array([1, -1, 2]) * 1000
#E = np.array([20,10,20])
R = np.array([1, 1, 1]) * 100
V = np.array([0, 0, 1])
viewM = view_transformation(E, R, V)
perspM = persp_transformation(100, -100)
M = np.dot(perspM, viewM)
return M
def test_proj():
import pylab
M = test_proj_make_M()
ts = ['%d' % i for i in [0,1,2,3,0,4,5,6,7,4]]
xs, ys, zs = [0,1,1,0,0, 0,1,1,0,0], [0,0,1,1,0, 0,0,1,1,0], \
[0,0,0,0,0, 1,1,1,1,1]
xs, ys, zs = [np.array(v)*300 for v in (xs, ys, zs)]
#
test_proj_draw_axes(M, s=400)
txs, tys, tzs = proj_transform(xs, ys, zs, M)
ixs, iys, izs = inv_transform(txs, tys, tzs, M)
pylab.scatter(txs, tys, c=tzs)
pylab.plot(txs, tys, c='r')
for x, y, t in zip(txs, tys, ts):
pylab.text(x, y, t)
pylab.xlim(-0.2, 0.2)
pylab.ylim(-0.2, 0.2)
pylab.show()
def rot_x(V, alpha):
cosa, sina = np.cos(alpha), np.sin(alpha)
M1 = np.array([[1,0,0,0],
[0,cosa,-sina,0],
[0,sina,cosa,0],
[0,0,0,0]])
return np.dot(M1, V)
def test_rot():
V = [1,0,0,1]
print(rot_x(V, np.pi/6))
V = [0,1,0,1]
print(rot_x(V, np.pi/6))
if __name__ == "__main__":
test_proj()
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/mpl_toolkits/mplot3d/proj3d.py | Python | bsd-2-clause | 6,988 |
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This script contains unit tests of the :mod:`rmgpy.statmech.rotation` module.
"""
import unittest
import math
import numpy
from rmgpy.statmech.rotation import LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor
import rmgpy.constants as constants
################################################################################
class TestLinearRotor(unittest.TestCase):
"""
Contains unit tests of the LinearRotor class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.inertia = 11.75
self.symmetry = 2
self.quantum = False
self.mode = LinearRotor(
inertia = (self.inertia,"amu*angstrom^2"),
symmetry = self.symmetry,
quantum = self.quantum,
)
def test_getRotationalConstant(self):
"""
Test getting the LinearRotor.rotationalConstant property.
"""
Bexp = 1.434692
Bact = self.mode.rotationalConstant.value_si
self.assertAlmostEqual(Bexp, Bact, 4)
def test_setRotationalConstant(self):
"""
Test setting the LinearRotor.rotationalConstant property.
"""
B = self.mode.rotationalConstant
B.value_si *= 2
self.mode.rotationalConstant = B
Iexp = 0.5 * self.inertia
Iact = self.mode.inertia.value_si * constants.Na * 1e23
self.assertAlmostEqual(Iexp, Iact, 4)
def test_getLevelEnergy(self):
"""
Test the LinearRotor.getLevelEnergy() method.
"""
B = self.mode.rotationalConstant.value_si * constants.h * constants.c * 100.
B *= constants.Na
for J in range(0, 100):
Eexp = B * J * (J + 1)
Eact = self.mode.getLevelEnergy(J)
if J == 0:
self.assertEqual(Eact, 0)
else:
self.assertAlmostEqual(Eexp, Eact, delta=1e-4*Eexp)
def test_getLevelDegeneracy(self):
"""
Test the LinearRotor.getLevelDegeneracy() method.
"""
for J in range(0, 100):
gexp = 2 * J + 1
gact = self.mode.getLevelDegeneracy(J)
self.assertEqual(gexp, gact)
def test_getPartitionFunction_classical(self):
"""
Test the LinearRotor.getPartitionFunction() method for a classical
rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Qexplist = numpy.array([72.6691, 121.115, 242.230, 363.346, 484.461])
for T, Qexp in zip(Tlist, Qexplist):
Qact = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp)
def test_getPartitionFunction_quantum(self):
"""
Test the LinearRotor.getPartitionFunction() method for a quantum
rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Qexplist = numpy.array([72.8360, 121.282, 242.391, 363.512, 484.627])
for T, Qexp in zip(Tlist, Qexplist):
Qact = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp)
def test_getHeatCapacity_classical(self):
"""
Test the LinearRotor.getHeatCapacity() method using a classical rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Cvexplist = numpy.array([1, 1, 1, 1, 1]) * constants.R
for T, Cvexp in zip(Tlist, Cvexplist):
Cvact = self.mode.getHeatCapacity(T)
self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp)
def test_getHeatCapacity_quantum(self):
"""
Test the LinearRotor.getHeatCapacity() method using a quantum rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Cvexplist = numpy.array([1, 1, 1, 1, 1]) * constants.R
for T, Cvexp in zip(Tlist, Cvexplist):
Cvact = self.mode.getHeatCapacity(T)
self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp)
def test_getEnthalpy_classical(self):
"""
Test the LinearRotor.getEnthalpy() method using a classical rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Hexplist = numpy.array([1, 1, 1, 1, 1]) * constants.R * Tlist
for T, Hexp in zip(Tlist, Hexplist):
Hact = self.mode.getEnthalpy(T)
self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp)
def test_getEnthalpy_quantum(self):
"""
Test the LinearRotor.getEnthalpy() method using a quantum rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Hexplist = numpy.array([0.997705, 0.998624, 0.999312, 0.999541, 0.999656]) * constants.R * Tlist
for T, Hexp in zip(Tlist, Hexplist):
Hact = self.mode.getEnthalpy(T)
self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp)
def test_getEntropy_classical(self):
"""
Test the LinearRotor.getEntropy() method using a classical rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Sexplist = numpy.array([5.28592, 5.79674, 6.48989, 6.89535, 7.18304]) * constants.R
for T, Sexp in zip(Tlist, Sexplist):
Sact = self.mode.getEntropy(T)
self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp)
def test_getEntropy_quantum(self):
"""
Test the LinearRotor.getEntropy() method using a quantum rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Sexplist = numpy.array([5.28592, 5.79674, 6.48989, 6.89535, 7.18304]) * constants.R
for T, Sexp in zip(Tlist, Sexplist):
Sact = self.mode.getEntropy(T)
self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp)
def test_getSumOfStates_classical(self):
"""
Test the LinearRotor.getSumOfStates() method using a classical rotor.
"""
self.mode.quantum = False
Elist = numpy.arange(0, 2000*11.96, 1.0*11.96)
densStates = self.mode.getDensityOfStates(Elist)
sumStates = self.mode.getSumOfStates(Elist)
for n in range(1, len(Elist)):
self.assertAlmostEqual(numpy.sum(densStates[0:n]) / sumStates[n], 1.0, 3)
def test_getSumOfStates_quantum(self):
"""
Test the LinearRotor.getSumOfStates() method using a quantum rotor.
"""
self.mode.quantum = True
Elist = numpy.arange(0, 4000.*11.96, 2.0*11.96)
densStates = self.mode.getDensityOfStates(Elist)
sumStates = self.mode.getSumOfStates(Elist)
for n in range(1, len(Elist)):
self.assertAlmostEqual(numpy.sum(densStates[0:n+1]) / sumStates[n], 1.0, 3)
def test_getDensityOfStates_classical(self):
"""
Test the LinearRotor.getDensityOfStates() method using a classical
rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,400,500])
Elist = numpy.arange(0, 4000.*11.96, 1.0*11.96)
for T in Tlist:
densStates = self.mode.getDensityOfStates(Elist)
Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T))
Qexp = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp)
def test_getDensityOfStates_quantum(self):
"""
Test the LinearRotor.getDensityOfStates() method using a quantum rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,400,500])
Elist = numpy.arange(0, 4000.*11.96, 2.0*11.96)
for T in Tlist:
densStates = self.mode.getDensityOfStates(Elist)
Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T))
Qexp = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp)
def test_repr(self):
"""
Test that a LinearRotor object can be reconstructed from its repr()
output with no loss of information.
"""
mode = None
exec('mode = {0!r}'.format(self.mode))
self.assertAlmostEqual(self.mode.inertia.value, mode.inertia.value, 6)
self.assertEqual(self.mode.inertia.units, mode.inertia.units)
self.assertEqual(self.mode.symmetry, mode.symmetry)
self.assertEqual(self.mode.quantum, mode.quantum)
def test_pickle(self):
"""
Test that a LinearRotor object can be pickled and unpickled with no
loss of information.
"""
import cPickle
mode = cPickle.loads(cPickle.dumps(self.mode,-1))
self.assertAlmostEqual(self.mode.inertia.value, mode.inertia.value, 6)
self.assertEqual(self.mode.inertia.units, mode.inertia.units)
self.assertEqual(self.mode.symmetry, mode.symmetry)
self.assertEqual(self.mode.quantum, mode.quantum)
################################################################################
class TestNonlinearRotor(unittest.TestCase):
"""
Contains unit tests of the NonlinearRotor class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.inertia = numpy.array([3.415, 16.65, 20.07])
self.symmetry = 4
self.quantum = False
self.mode = NonlinearRotor(
inertia = (self.inertia,"amu*angstrom^2"),
symmetry = self.symmetry,
quantum = self.quantum,
)
def test_getRotationalConstant(self):
"""
Test getting the NonlinearRotor.rotationalConstant property.
"""
Bexp = numpy.array([4.93635, 1.0125, 0.839942])
Bact = self.mode.rotationalConstant.value_si
for B0, B in zip(Bexp, Bact):
self.assertAlmostEqual(B0, B, 4)
def test_setRotationalConstant(self):
"""
Test setting the NonlinearRotor.rotationalConstant property.
"""
B = self.mode.rotationalConstant
B.value_si *= 2
self.mode.rotationalConstant = B
Iexp = 0.5 * self.inertia
Iact = self.mode.inertia.value_si * constants.Na * 1e23
for I0, I in zip(Iexp, Iact):
self.assertAlmostEqual(I0, I, 4)
def test_getPartitionFunction_classical(self):
"""
Test the NonlinearRotor.getPartitionFunction() method for a classical
rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Qexplist = numpy.array([651.162, 1401.08, 3962.84, 7280.21, 11208.6])
for T, Qexp in zip(Tlist, Qexplist):
Qact = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp)
def test_getHeatCapacity_classical(self):
"""
Test the NonlinearRotor.getHeatCapacity() method using a classical
rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Cvexplist = numpy.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R
for T, Cvexp in zip(Tlist, Cvexplist):
Cvact = self.mode.getHeatCapacity(T)
self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp)
def test_getEnthalpy_classical(self):
"""
Test the NonlinearRotor.getEnthalpy() method using a classical rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Hexplist = numpy.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R * Tlist
for T, Hexp in zip(Tlist, Hexplist):
Hact = self.mode.getEnthalpy(T)
self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp)
def test_getEntropy_classical(self):
"""
Test the NonlinearRotor.getEntropy() method using a classical rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Sexplist = numpy.array([7.97876, 8.74500, 9.78472, 10.3929, 10.8244]) * constants.R
for T, Sexp in zip(Tlist, Sexplist):
Sact = self.mode.getEntropy(T)
self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp)
def test_getSumOfStates_classical(self):
"""
Test the NonlinearRotor.getSumOfStates() method using a classical rotor.
"""
self.mode.quantum = False
Elist = numpy.arange(0, 1000*11.96, 1*11.96)
sumStates = self.mode.getSumOfStates(Elist)
densStates = self.mode.getDensityOfStates(Elist)
for n in range(10, len(Elist)):
self.assertTrue(0.8 < numpy.sum(densStates[0:n]) / sumStates[n] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n]), sumStates[n]))
def test_getDensityOfStates_classical(self):
"""
Test the NonlinearRotor.getDensityOfStates() method using a classical
rotor.
"""
self.mode.quantum = False
Elist = numpy.arange(0, 1000*11.96, 1*11.96)
densStates = self.mode.getDensityOfStates(Elist)
T = 100
Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T))
Qexp = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp)
def test_repr(self):
"""
Test that a NonlinearRotor object can be reconstructed from its
repr() output with no loss of information.
"""
mode = None
exec('mode = {0!r}'.format(self.mode))
self.assertEqual(self.mode.inertia.value.shape, mode.inertia.value.shape)
for I0, I in zip(self.mode.inertia.value, mode.inertia.value):
self.assertAlmostEqual(I0, I, 6)
self.assertEqual(self.mode.inertia.units, mode.inertia.units)
self.assertEqual(self.mode.symmetry, mode.symmetry)
self.assertEqual(self.mode.quantum, mode.quantum)
def test_pickle(self):
"""
Test that a NonlinearRotor object can be pickled and unpickled with
no loss of information.
"""
import cPickle
mode = cPickle.loads(cPickle.dumps(self.mode,-1))
self.assertEqual(self.mode.inertia.value.shape, mode.inertia.value.shape)
for I0, I in zip(self.mode.inertia.value, mode.inertia.value):
self.assertAlmostEqual(I0, I, 6)
self.assertEqual(self.mode.inertia.units, mode.inertia.units)
self.assertEqual(self.mode.symmetry, mode.symmetry)
self.assertEqual(self.mode.quantum, mode.quantum)
################################################################################
class TestKRotor(unittest.TestCase):
"""
Contains unit tests of the KRotor class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.inertia = 11.75
self.symmetry = 2
self.quantum = False
self.mode = KRotor(
inertia = (self.inertia,"amu*angstrom^2"),
symmetry = self.symmetry,
quantum = self.quantum,
)
def test_getRotationalConstant(self):
"""
Test getting the KRotor.rotationalConstant property.
"""
Bexp = 1.434692
Bact = self.mode.rotationalConstant.value_si
self.assertAlmostEqual(Bexp, Bact, 4)
def test_setRotationalConstant(self):
"""
Test setting the KRotor.rotationalConstant property.
"""
B = self.mode.rotationalConstant
B.value_si *= 2
self.mode.rotationalConstant = B
Iexp = 0.5 * self.inertia
Iact = self.mode.inertia.value_si * constants.Na * 1e23
self.assertAlmostEqual(Iexp, Iact, 4)
def test_getLevelEnergy(self):
"""
Test the KRotor.getLevelEnergy() method.
"""
B = self.mode.rotationalConstant.value_si * constants.h * constants.c * 100.
B *= constants.Na
for J in range(0, 100):
Eexp = float(B * J * J)
Eact = float(self.mode.getLevelEnergy(J))
if J == 0:
self.assertEqual(Eact, 0)
else:
self.assertAlmostEqual(Eexp, Eact, delta=1e-4*Eexp)
def test_getLevelDegeneracy(self):
"""
Test the KRotor.getLevelDegeneracy() method.
"""
for J in range(0, 100):
gexp = 1 if J == 0 else 2
gact = self.mode.getLevelDegeneracy(J)
self.assertEqual(gexp, gact, '{0} != {1}'.format(gact, gexp))
def test_getPartitionFunction_classical(self):
"""
Test the KRotor.getPartitionFunction() method for a classical
rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Qexplist = numpy.array([10.6839, 13.7929, 19.5060, 23.8899, 27.5857])
for T, Qexp in zip(Tlist, Qexplist):
Qact = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp)
def test_getPartitionFunction_quantum(self):
"""
Test the KRotor.getPartitionFunction() method for a quantum
rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Qexplist = numpy.array([10.6839, 13.7929, 19.5060, 23.8899, 27.5857])
for T, Qexp in zip(Tlist, Qexplist):
Qact = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp)
def test_getHeatCapacity_classical(self):
"""
Test the KRotor.getHeatCapacity() method using a classical rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Cvexplist = numpy.array([0.5, 0.5, 0.5, 0.5, 0.5]) * constants.R
for T, Cvexp in zip(Tlist, Cvexplist):
Cvact = self.mode.getHeatCapacity(T)
self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp)
def test_getHeatCapacity_quantum(self):
"""
Test the KRotor.getHeatCapacity() method using a quantum rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Cvexplist = numpy.array([0.5, 0.5, 0.5, 0.5, 0.5]) * constants.R
for T, Cvexp in zip(Tlist, Cvexplist):
Cvact = self.mode.getHeatCapacity(T)
self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp)
def test_getEnthalpy_classical(self):
"""
Test the KRotor.getEnthalpy() method using a classical rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Hexplist = numpy.array([0.5, 0.5, 0.5, 0.5, 0.5]) * constants.R * Tlist
for T, Hexp in zip(Tlist, Hexplist):
Hact = self.mode.getEnthalpy(T)
self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp)
def test_getEnthalpy_quantum(self):
"""
Test the KRotor.getEnthalpy() method using a quantum rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Hexplist = numpy.array([0.5, 0.5, 0.5, 0.5, 0.5]) * constants.R * Tlist
for T, Hexp in zip(Tlist, Hexplist):
Hact = self.mode.getEnthalpy(T)
self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp)
def test_getEntropy_classical(self):
"""
Test the KRotor.getEntropy() method using a classical rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Sexplist = numpy.array([2.86874, 3.12415, 3.47072, 3.67346, 3.81730]) * constants.R
for T, Sexp in zip(Tlist, Sexplist):
Sact = self.mode.getEntropy(T)
self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp)
def test_getEntropy_quantum(self):
"""
Test the KRotor.getEntropy() method using a quantum rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Sexplist = numpy.array([2.86874, 3.12415, 3.47072, 3.67346, 3.81730]) * constants.R
for T, Sexp in zip(Tlist, Sexplist):
Sact = self.mode.getEntropy(T)
self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp)
def test_getSumOfStates_classical(self):
"""
Test the KRotor.getSumOfStates() method using a classical rotor.
"""
self.mode.quantum = False
Elist = numpy.arange(0, 1000*11.96, 1*11.96)
sumStates = self.mode.getSumOfStates(Elist)
densStates = self.mode.getDensityOfStates(Elist)
for n in range(10, len(Elist)):
self.assertTrue(0.75 < numpy.sum(densStates[0:n+1]) / sumStates[n] < 1.3333, '{0} != {1}'.format(numpy.sum(densStates[0:n+1]), sumStates[n]))
def test_getSumOfStates_quantum(self):
"""
Test the KRotor.getSumOfStates() method using a quantum rotor.
"""
self.mode.quantum = True
Elist = numpy.arange(0, 1000*11.96, 1*11.96)
sumStates = self.mode.getSumOfStates(Elist)
densStates = self.mode.getDensityOfStates(Elist)
for n in range(10, len(Elist)):
self.assertTrue(0.8 < numpy.sum(densStates[0:n+1]) / sumStates[n] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n+1]), sumStates[n]))
def test_getDensityOfStates_classical(self):
"""
Test the KRotor.getDensityOfStates() method using a classical
rotor.
"""
self.mode.quantum = False
Elist = numpy.arange(0, 3000*11.96, 0.05*11.96)
densStates = self.mode.getDensityOfStates(Elist)
T = 500
Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T))
Qexp = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp)
def test_getDensityOfStates_quantum(self):
"""
Test the KRotor.getDensityOfStates() method using a quantum rotor.
"""
self.mode.quantum = True
Elist = numpy.arange(0, 4000*11.96, 2*11.96)
densStates = self.mode.getDensityOfStates(Elist)
T = 500
Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T))
Qexp = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp)
def test_repr(self):
"""
Test that a KRotor object can be reconstructed from its repr() output
with no loss of information.
"""
mode = None
exec('mode = {0!r}'.format(self.mode))
self.assertAlmostEqual(self.mode.inertia.value, mode.inertia.value, 6)
self.assertEqual(self.mode.inertia.units, mode.inertia.units)
self.assertEqual(self.mode.symmetry, mode.symmetry)
self.assertEqual(self.mode.quantum, mode.quantum)
def test_pickle(self):
"""
Test that a KRotor object can be pickled and unpickled with no loss
of information.
"""
import cPickle
mode = cPickle.loads(cPickle.dumps(self.mode,-1))
self.assertAlmostEqual(self.mode.inertia.value, mode.inertia.value, 6)
self.assertEqual(self.mode.inertia.units, mode.inertia.units)
self.assertEqual(self.mode.symmetry, mode.symmetry)
self.assertEqual(self.mode.quantum, mode.quantum)
################################################################################
class TestSphericalTopRotor(unittest.TestCase):
"""
Contains unit tests of the SphericalTopRotor class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.inertia = 11.75
self.symmetry = 2
self.quantum = False
self.mode = SphericalTopRotor(
inertia = (self.inertia,"amu*angstrom^2"),
symmetry = self.symmetry,
quantum = self.quantum,
)
def test_getRotationalConstant(self):
"""
Test getting the SphericalTopRotor.rotationalConstant property.
"""
Bexp = 1.434692
Bact = self.mode.rotationalConstant.value_si
self.assertAlmostEqual(Bexp, Bact, 4)
def test_setRotationalConstant(self):
"""
Test setting the SphericalTopRotor.rotationalConstant property.
"""
B = self.mode.rotationalConstant
B.value_si *= 2
self.mode.rotationalConstant = B
Iexp = 0.5 * self.inertia
Iact = self.mode.inertia.value_si * constants.Na * 1e23
self.assertAlmostEqual(Iexp, Iact, 4)
def test_getLevelEnergy(self):
"""
Test the SphericalTopRotor.getLevelEnergy() method.
"""
B = self.mode.rotationalConstant.value_si * constants.h * constants.c * 100.
B *= constants.Na
for J in range(0, 100):
Eexp = B * J * (J + 1)
Eact = self.mode.getLevelEnergy(J)
if J == 0:
self.assertEqual(Eact, 0)
else:
self.assertAlmostEqual(Eexp, Eact, delta=1e-4*Eexp)
def test_getLevelDegeneracy(self):
"""
Test the SphericalTopRotor.getLevelDegeneracy() method.
"""
for J in range(0, 100):
gexp = (2 * J + 1)**2
gact = self.mode.getLevelDegeneracy(J)
self.assertEqual(gexp, gact, '{0} != {1}'.format(gact, gexp))
def test_getPartitionFunction_classical(self):
"""
Test the SphericalTopRotor.getPartitionFunction() method for a classical
rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Qexplist = numpy.array([1552.74, 3340.97, 9449.69, 17360.2, 26727.8])
for T, Qexp in zip(Tlist, Qexplist):
Qact = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp)
def test_getPartitionFunction_quantum(self):
"""
Test the SphericalTopRotor.getPartitionFunction() method for a quantum
rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Qexplist = numpy.array([1555.42, 3344.42, 9454.57, 17366.2, 26734.7])
for T, Qexp in zip(Tlist, Qexplist):
Qact = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp)
def test_getHeatCapacity_classical(self):
"""
Test the SphericalTopRotor.getHeatCapacity() method using a classical rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Cvexplist = numpy.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R
for T, Cvexp in zip(Tlist, Cvexplist):
Cvact = self.mode.getHeatCapacity(T)
self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp)
def test_getHeatCapacity_quantum(self):
"""
Test the SphericalTopRotor.getHeatCapacity() method using a quantum rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Cvexplist = numpy.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R
for T, Cvexp in zip(Tlist, Cvexplist):
Cvact = self.mode.getHeatCapacity(T)
self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp)
def test_getEnthalpy_classical(self):
"""
Test the SphericalTopRotor.getEnthalpy() method using a classical rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Hexplist = numpy.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R * Tlist
for T, Hexp in zip(Tlist, Hexplist):
Hact = self.mode.getEnthalpy(T)
self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp)
def test_getEnthalpy_quantum(self):
"""
Test the SphericalTopRotor.getEnthalpy() method using a quantum rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Hexplist = numpy.array([1.49828, 1.49897, 1.49948, 1.49966, 1.49974]) * constants.R * Tlist
for T, Hexp in zip(Tlist, Hexplist):
Hact = self.mode.getEnthalpy(T)
self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp)
def test_getEntropy_classical(self):
"""
Test the SphericalTopRotor.getEntropy() method using a classical rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Sexplist = numpy.array([8.84778, 9.61402, 10.6537, 11.2619, 11.6935]) * constants.R
for T, Sexp in zip(Tlist, Sexplist):
Sact = self.mode.getEntropy(T)
self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp)
def test_getEntropy_quantum(self):
"""
Test the SphericalTopRotor.getEntropy() method using a quantum rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Sexplist = numpy.array([8.84778, 9.61402, 10.6537, 11.2619, 11.6935]) * constants.R
for T, Sexp in zip(Tlist, Sexplist):
Sact = self.mode.getEntropy(T)
self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp)
def test_getSumOfStates_classical(self):
"""
Test the SphericalTopRotor.getSumOfStates() method using a classical rotor.
"""
self.mode.quantum = False
Elist = numpy.arange(0, 2000*11.96, 1.0*11.96)
densStates = self.mode.getDensityOfStates(Elist)
sumStates = self.mode.getSumOfStates(Elist)
for n in range(20, len(Elist)):
self.assertAlmostEqual(numpy.sum(densStates[0:n+1]) / sumStates[n], 1.0, 1)
def test_getSumOfStates_quantum(self):
"""
Test the SphericalTopRotor.getSumOfStates() method using a quantum rotor.
"""
self.mode.quantum = True
Elist = numpy.arange(0, 2000*11.96, 1.0*11.96)
densStates = self.mode.getDensityOfStates(Elist)
sumStates = self.mode.getSumOfStates(Elist)
for n in range(1, len(Elist)):
self.assertAlmostEqual(numpy.sum(densStates[0:n+1]) / sumStates[n], 1.0, 3)
def test_getDensityOfStates_classical(self):
"""
Test the SphericalTopRotor.getDensityOfStates() method using a classical
rotor.
"""
self.mode.quantum = False
Tlist = numpy.array([300,400,500])
Elist = numpy.arange(0, 2000*11.96, 1.0*11.96)
for T in Tlist:
densStates = self.mode.getDensityOfStates(Elist)
Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T))
Qexp = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp)
def test_getDensityOfStates_quantum(self):
"""
Test the SphericalTopRotor.getDensityOfStates() method using a quantum rotor.
"""
self.mode.quantum = True
Tlist = numpy.array([300,400,500])
Elist = numpy.arange(0, 4000*11.96, 2.0*11.96)
for T in Tlist:
densStates = self.mode.getDensityOfStates(Elist)
Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T))
Qexp = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp)
def test_repr(self):
"""
Test that a SphericalTopRotor object can be reconstructed from its
repr() output with no loss of information.
"""
mode = None
exec('mode = {0!r}'.format(self.mode))
self.assertAlmostEqual(self.mode.inertia.value, mode.inertia.value, 6)
self.assertEqual(self.mode.inertia.units, mode.inertia.units)
self.assertEqual(self.mode.symmetry, mode.symmetry)
self.assertEqual(self.mode.quantum, mode.quantum)
def test_pickle(self):
"""
Test that a SphericalTopRotor object can be pickled and unpickled
with no loss of information.
"""
import cPickle
mode = cPickle.loads(cPickle.dumps(self.mode,-1))
self.assertAlmostEqual(self.mode.inertia.value, mode.inertia.value, 6)
self.assertEqual(self.mode.inertia.units, mode.inertia.units)
self.assertEqual(self.mode.symmetry, mode.symmetry)
self.assertEqual(self.mode.quantum, mode.quantum)
| faribas/RMG-Py | rmgpy/statmech/rotationTest.py | Python | mit | 34,218 |
## @file
# This file is used to define class objects of INF file [Ppis] section.
# It will consumed by InfParser.
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
'''
InfPpiObject
'''
from Library.ParserValidate import IsValidCVariableName
from Library.CommentParsing import ParseComment
from Library.ExpressionValidate import IsValidFeatureFlagExp
from Library.Misc import Sdict
from Library import DataType as DT
import Logger.Log as Logger
from Logger import ToolError
from Logger import StringTable as ST
def ParsePpiComment(CommentsList, InfPpiItemObj):
PreNotify = None
PreUsage = None
PreHelpText = ''
BlockFlag = -1
CommentInsList = []
Count = 0
for CommentItem in CommentsList:
Count = Count + 1
CommentItemUsage, \
CommentItemNotify, \
CommentItemString, \
CommentItemHelpText = \
ParseComment(CommentItem,
DT.ALL_USAGE_TOKENS,
DT.PPI_NOTIFY_TOKENS,
['PPI'],
False)
#
# To avoid PyLint error
#
if CommentItemString:
pass
if CommentItemHelpText == None:
CommentItemHelpText = ''
if Count == len(CommentsList) and CommentItemUsage == CommentItemNotify == DT.ITEM_UNDEFINED:
CommentItemHelpText = DT.END_OF_LINE
#
# For the Last comment Item, set BlockFlag.
#
if Count == len(CommentsList):
if BlockFlag == 1 or BlockFlag == 2:
if CommentItemUsage == CommentItemNotify == DT.ITEM_UNDEFINED:
BlockFlag = 4
else:
BlockFlag = 3
elif BlockFlag == -1:
BlockFlag = 4
#
# Comment USAGE and NOTIFY information are "UNDEFINED"
#
if BlockFlag == -1 or BlockFlag == 1 or BlockFlag == 2:
if CommentItemUsage == CommentItemNotify == DT.ITEM_UNDEFINED:
if BlockFlag == -1:
BlockFlag = 1
elif BlockFlag == 1:
BlockFlag = 2
else:
if BlockFlag == 1 or BlockFlag == 2:
BlockFlag = 3
#
# An item have Usage or Notify information and the first time get this information
#
elif BlockFlag == -1:
BlockFlag = 4
#
# Combine two comment line if they are generic comment
#
if CommentItemUsage == CommentItemNotify == PreUsage == PreNotify == DT.ITEM_UNDEFINED:
CommentItemHelpText = PreHelpText + DT.END_OF_LINE + CommentItemHelpText
#
# Store this information for next line may still need combine operation.
#
PreHelpText = CommentItemHelpText
if BlockFlag == 4:
CommentItemIns = InfPpiItemCommentContent()
CommentItemIns.SetUsage(CommentItemUsage)
CommentItemIns.SetNotify(CommentItemNotify)
CommentItemIns.SetHelpStringItem(CommentItemHelpText)
CommentInsList.append(CommentItemIns)
BlockFlag = -1
PreUsage = None
PreNotify = None
PreHelpText = ''
elif BlockFlag == 3:
#
# Add previous help string
#
CommentItemIns = InfPpiItemCommentContent()
CommentItemIns.SetUsage(DT.ITEM_UNDEFINED)
CommentItemIns.SetNotify(DT.ITEM_UNDEFINED)
if PreHelpText == '' or PreHelpText.endswith(DT.END_OF_LINE):
PreHelpText += DT.END_OF_LINE
CommentItemIns.SetHelpStringItem(PreHelpText)
CommentInsList.append(CommentItemIns)
#
# Add Current help string
#
CommentItemIns = InfPpiItemCommentContent()
CommentItemIns.SetUsage(CommentItemUsage)
CommentItemIns.SetNotify(CommentItemNotify)
CommentItemIns.SetHelpStringItem(CommentItemHelpText)
CommentInsList.append(CommentItemIns)
BlockFlag = -1
PreUsage = None
PreNotify = None
PreHelpText = ''
else:
PreUsage = CommentItemUsage
PreNotify = CommentItemNotify
PreHelpText = CommentItemHelpText
InfPpiItemObj.SetCommentList(CommentInsList)
return InfPpiItemObj
class InfPpiItemCommentContent():
def __init__(self):
#
# ## SOMETIMES_CONSUMES ## HelpString
#
self.UsageItem = ''
#
# Help String
#
self.HelpStringItem = ''
self.Notify = ''
self.CommentList = []
def SetUsage(self, UsageItem):
self.UsageItem = UsageItem
def GetUsage(self):
return self.UsageItem
def SetNotify(self, Notify):
if Notify != DT.ITEM_UNDEFINED:
self.Notify = 'true'
def GetNotify(self):
return self.Notify
def SetHelpStringItem(self, HelpStringItem):
self.HelpStringItem = HelpStringItem
def GetHelpStringItem(self):
return self.HelpStringItem
class InfPpiItem():
def __init__(self):
self.Name = ''
self.FeatureFlagExp = ''
self.SupArchList = []
self.CommentList = []
def SetName(self, Name):
self.Name = Name
def GetName(self):
return self.Name
def SetSupArchList(self, SupArchList):
self.SupArchList = SupArchList
def GetSupArchList(self):
return self.SupArchList
def SetCommentList(self, CommentList):
self.CommentList = CommentList
def GetCommentList(self):
return self.CommentList
def SetFeatureFlagExp(self, FeatureFlagExp):
self.FeatureFlagExp = FeatureFlagExp
def GetFeatureFlagExp(self):
return self.FeatureFlagExp
##
#
#
#
class InfPpiObject():
def __init__(self):
self.Ppis = Sdict()
#
# Macro defined in this section should be only used in this section.
#
self.Macros = {}
def SetPpi(self, PpiList, Arch = None):
__SupArchList = []
for ArchItem in Arch:
#
# Validate Arch
#
if (ArchItem == '' or ArchItem == None):
ArchItem = 'COMMON'
__SupArchList.append(ArchItem)
for Item in PpiList:
#
# Get Comment content of this protocol
#
CommentsList = None
if len(Item) == 3:
CommentsList = Item[1]
CurrentLineOfItem = Item[2]
Item = Item[0]
InfPpiItemObj = InfPpiItem()
if len(Item) >= 1 and len(Item) <= 2:
#
# Only CName contained
#
if not IsValidCVariableName(Item[0]):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_INVALID_CNAME%(Item[0]),
File=CurrentLineOfItem[2],
Line=CurrentLineOfItem[1],
ExtraData=CurrentLineOfItem[0])
if (Item[0] != ''):
InfPpiItemObj.SetName(Item[0])
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_CNAME_MISSING,
File=CurrentLineOfItem[2],
Line=CurrentLineOfItem[1],
ExtraData=CurrentLineOfItem[0])
#
# Have FeatureFlag information
#
if len(Item) == 2:
#
# Contained CName and Feature Flag Express
# <statements> ::= <CName> ["|" <FeatureFlagExpress>]
# Item[1] should not be empty
#
if Item[1].strip() == '':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_MISSING,
File=CurrentLineOfItem[2],
Line=CurrentLineOfItem[1],
ExtraData=CurrentLineOfItem[0])
#
# Validate Feature Flag Express for PPI entry
# Item[1] contain FFE information
#
FeatureFlagRtv = IsValidFeatureFlagExp(Item[1].strip())
if not FeatureFlagRtv[0]:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_SYNTAX_INVLID%(FeatureFlagRtv[1]),
File=CurrentLineOfItem[2],
Line=CurrentLineOfItem[1],
ExtraData=CurrentLineOfItem[0])
InfPpiItemObj.SetFeatureFlagExp(Item[1])
if len(Item) != 1 and len(Item) != 2:
#
# Invalid format of Ppi statement
#
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_GUID_PPI_PROTOCOL_SECTION_CONTENT_ERROR,
File=CurrentLineOfItem[2],
Line=CurrentLineOfItem[1],
ExtraData=CurrentLineOfItem[0])
#
# Get/Set Usage and HelpString for PPI entry
#
if CommentsList != None and len(CommentsList) != 0:
InfPpiItemObj = ParsePpiComment(CommentsList, InfPpiItemObj)
else:
CommentItemIns = InfPpiItemCommentContent()
CommentItemIns.SetUsage(DT.ITEM_UNDEFINED)
CommentItemIns.SetNotify(DT.ITEM_UNDEFINED)
InfPpiItemObj.SetCommentList([CommentItemIns])
InfPpiItemObj.SetSupArchList(__SupArchList)
#
# Determine PPI name duplicate. Follow below rule:
#
# A PPI must not be duplicated within a [Ppis] section.
# A PPI may appear in multiple architectural [Ppis]
# sections. A PPI listed in an architectural [Ppis]
# section must not be listed in the common architectural
# [Ppis] section.
#
# NOTE: This check will not report error now.
#
for Item in self.Ppis:
if Item.GetName() == InfPpiItemObj.GetName():
ItemSupArchList = Item.GetSupArchList()
for ItemArch in ItemSupArchList:
for PpiItemObjArch in __SupArchList:
if ItemArch == PpiItemObjArch:
#
# ST.ERR_INF_PARSER_ITEM_DUPLICATE
#
pass
if ItemArch.upper() == 'COMMON' or PpiItemObjArch.upper() == 'COMMON':
#
# ST.ERR_INF_PARSER_ITEM_DUPLICATE_COMMON
#
pass
if self.Ppis.has_key((InfPpiItemObj)):
PpiList = self.Ppis[InfPpiItemObj]
PpiList.append(InfPpiItemObj)
self.Ppis[InfPpiItemObj] = PpiList
else:
PpiList = []
PpiList.append(InfPpiItemObj)
self.Ppis[InfPpiItemObj] = PpiList
return True
def GetPpi(self):
return self.Ppis | carmark/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Object/Parser/InfPpiObject.py | Python | gpl-2.0 | 12,929 |
#!/usr/bin/env python
# encoding: utf-8
"""
Creates a new sound table from random chunks of a soundfile.
"""
from pyo import *
import random, os
s = Server(sr=44100, nchnls=2, buffersize=512, duplex=0).boot()
path = "../snds/baseballmajeur_m.aif"
dur = sndinfo(path)[1]
t = SndTable(path, start=0, stop=1)
amp = Fader(fadein=0.005, fadeout=0.005, dur=0, mul=0.4).play()
a = Looper(t, pitch=[1.,1.], dur=t.getDur(), xfade=5, mul=amp).out()
def addsnd():
start = random.uniform(0, dur*0.7)
duration = random.uniform(.1, .3)
pos = random.uniform(0.05, t.getDur()-0.5)
cross = random.uniform(0.04, duration/2)
t.insert(path, pos=pos, crossfade=cross, start=start, stop=start+duration)
def delayed_generation():
start = random.uniform(0, dur*0.7)
duration = random.uniform(.1, .3)
t.setSound(path, start=start, stop=start+duration)
for i in range(10):
addsnd()
a.dur = t.getDur()
a.reset()
amp.play()
caller = CallAfter(function=delayed_generation, time=0.005).stop()
def gen():
amp.stop()
caller.play()
gen()
s.gui(locals()) | orbitfold/pyo | examples/tables/05_table_maker.py | Python | gpl-3.0 | 1,093 |
from collections import OrderedDict
from django.core.cache import cache
from django.conf import settings
import jingo
import jinja2
from bedrock.firefox.models import FirefoxOSFeedLink
from bedrock.firefox.firefox_details import firefox_desktop, firefox_android
from bedrock.base.urlresolvers import reverse
from lib.l10n_utils import get_locale
def android_builds(channel, builds=None):
builds = builds or []
variations = OrderedDict([
('api-9', 'Gingerbread'),
('api-11', 'Honeycomb+ ARMv7+'),
('x86', 'x86'),
])
if channel == 'alpha':
for type, arch_pretty in variations.iteritems():
link = firefox_android.get_download_url('alpha', type)
builds.append({'os': 'android',
'os_pretty': 'Android',
'os_arch_pretty': 'Android %s' % arch_pretty,
'arch': 'x86' if type == 'x86' else 'armv7up %s' % type,
'arch_pretty': arch_pretty,
'download_link': link})
else:
link = firefox_android.get_download_url(channel)
builds.append({'os': 'android',
'os_pretty': 'Android',
'download_link': link})
return builds
@jingo.register.function
@jinja2.contextfunction
def download_firefox(ctx, channel='release', small=False, icon=True,
platform='all', dom_id=None, locale=None, simple=False,
force_direct=False, force_full_installer=False,
force_funnelcake=False, check_old_fx=False):
""" Output a "download firefox" button.
:param ctx: context from calling template.
:param channel: name of channel: 'release', 'beta' or 'alpha'.
:param small: Display the small button if True.
:param icon: Display the Fx icon on the button if True.
:param platform: Target platform: 'desktop', 'android' or 'all'.
:param dom_id: Use this string as the id attr on the element.
:param locale: The locale of the download. Default to locale of request.
:param simple: Display button with text only if True. Will not display
icon or privacy/what's new/systems & languages links. Can be used
in conjunction with 'small'.
:param force_direct: Force the download URL to be direct.
:param force_full_installer: Force the installer download to not be
the stub installer (for aurora).
:param force_funnelcake: Force the download version for en-US Windows to be
'latest', which bouncer will translate to the funnelcake build.
:param check_old_fx: Checks to see if the user is on an old version of
Firefox and, if true, changes the button text from 'Free Download'
to 'Update your Firefox'. Must be used in conjunction with
'simple' param being true.
:return: The button html.
"""
show_desktop = platform in ['all', 'desktop']
show_android = platform in ['all', 'android']
alt_channel = '' if channel == 'release' else channel
locale = locale or get_locale(ctx['request'])
funnelcake_id = ctx.get('funnelcake_id', False)
dom_id = dom_id or 'download-button-%s-%s' % (
'desktop' if platform == 'all' else platform, channel)
l_version = firefox_desktop.latest_builds(locale, channel)
if l_version:
version, platforms = l_version
else:
locale = 'en-US'
version, platforms = firefox_desktop.latest_builds('en-US', channel)
# Gather data about the build for each platform
builds = []
if show_desktop:
for plat_os, plat_os_pretty in firefox_desktop.platform_labels.iteritems():
# Windows 64-bit builds are currently available only on the Aurora
# and Beta channel
if plat_os == 'win64' and channel not in ['alpha', 'beta']:
continue
# Fallback to en-US if this plat_os/version isn't available
# for the current locale
_locale = locale if plat_os_pretty in platforms else 'en-US'
# And generate all the info
download_link = firefox_desktop.get_download_url(
channel, version, plat_os, _locale,
force_direct=force_direct,
force_full_installer=force_full_installer,
force_funnelcake=force_funnelcake,
funnelcake_id=funnelcake_id,
)
# If download_link_direct is False the data-direct-link attr
# will not be output, and the JS won't attempt the IE popup.
if force_direct:
# no need to run get_download_url again with the same args
download_link_direct = False
else:
download_link_direct = firefox_desktop.get_download_url(
channel, version, plat_os, _locale,
force_direct=True,
force_full_installer=force_full_installer,
force_funnelcake=force_funnelcake,
funnelcake_id=funnelcake_id,
)
if download_link_direct == download_link:
download_link_direct = False
builds.append({'os': plat_os,
'os_pretty': plat_os_pretty,
'download_link': download_link,
'download_link_direct': download_link_direct})
if show_android:
builds = android_builds(channel, builds)
# Get the native name for current locale
langs = firefox_desktop.languages
locale_name = langs[locale]['native'] if locale in langs else locale
data = {
'locale_name': locale_name,
'version': version,
'product': 'firefox-android' if platform == 'android' else 'firefox',
'builds': builds,
'id': dom_id,
'small': small,
'simple': simple,
'channel': alt_channel,
'show_android': show_android,
'show_desktop': show_desktop,
'icon': icon,
'check_old_fx': check_old_fx and simple,
}
html = jingo.render_to_string(ctx['request'],
'firefox/includes/download-button.html',
data)
return jinja2.Markup(html)
@jingo.register.function
def firefox_url(platform, page, channel=None):
"""
Return a product-related URL like /firefox/all/ or /mobile/beta/notes/.
Examples
========
In Template
-----------
{{ firefox_url('desktop', 'all', 'organizations') }}
{{ firefox_url('desktop', 'sysreq', channel) }}
{{ firefox_url('android', 'notes') }}
"""
kwargs = {}
# Tweak the channel name for the naming URL pattern in urls.py
if channel == 'release':
channel = None
if channel == 'alpha':
if platform == 'desktop':
channel = 'developer'
if platform == 'android':
channel = 'aurora'
if channel == 'esr':
channel = 'organizations'
if channel:
kwargs['channel'] = channel
if page == 'notes' and platform != 'desktop':
kwargs['platform'] = platform
return reverse('firefox.%s' % page, kwargs=kwargs)
@jingo.register.function
def firefox_os_feed_links(locale, force_cache_refresh=False):
if locale in settings.FIREFOX_OS_FEED_LOCALES:
cache_key = 'firefox-os-feed-links-' + locale
if not force_cache_refresh:
links = cache.get(cache_key)
if links:
return links
links = list(
FirefoxOSFeedLink.objects.filter(locale=locale).order_by(
'-id').values_list('link', 'title')[:10])
cache.set(cache_key, links)
return links
elif '-' in locale:
return firefox_os_feed_links(locale.split('-')[0])
@jingo.register.function
def firefox_os_blog_link(locale):
try:
return settings.FXOS_PRESS_BLOG_LINKS[locale]
except KeyError:
if '-' in locale:
return firefox_os_blog_link(locale.split('-')[0])
else:
return None
| SujaySKumar/bedrock | bedrock/firefox/helpers.py | Python | mpl-2.0 | 8,166 |
"""
HTML Widget classes
"""
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
import copy
from itertools import chain
from django.conf import settings
from django.utils.datastructures import MultiValueDict, MergeDict
from django.utils.html import escape, conditional_escape
from django.utils.translation import ugettext
from django.utils.encoding import StrAndUnicode, force_unicode
from django.utils.safestring import mark_safe
from django.utils import datetime_safe
from datetime import time
from util import flatatt
from urlparse import urljoin
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'PasswordInput',
'HiddenInput', 'MultipleHiddenInput',
'FileInput', 'DateTimeInput', 'TimeInput', 'Textarea', 'CheckboxInput',
'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget',
'SplitDateTimeWidget',
)
MEDIA_TYPES = ('css','js')
class Media(StrAndUnicode):
def __init__(self, media=None, **kwargs):
if media:
media_attrs = media.__dict__
else:
media_attrs = kwargs
self._css = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name, None))
# Any leftover attributes must be invalid.
# if media_attrs != {}:
# raise TypeError, "'class Media' has invalid attribute(s): %s" % ','.join(media_attrs.keys())
def __unicode__(self):
return self.render()
def render(self):
return mark_safe(u'\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES])))
def render_js(self):
return [u'<script type="text/javascript" src="%s"></script>' % self.absolute_path(path) for path in self._js]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = self._css.keys()
media.sort()
return chain(*[
[u'<link href="%s" type="text/css" media="%s" rel="stylesheet" />' % (self.absolute_path(path), medium)
for path in self._css[medium]]
for medium in media])
def absolute_path(self, path):
if path.startswith(u'http://') or path.startswith(u'https://') or path.startswith(u'/'):
return path
return urljoin(settings.MEDIA_URL,path)
def __getitem__(self, name):
"Returns a Media object that only contains media of the given type"
if name in MEDIA_TYPES:
return Media(**{name: getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
self._js.extend([path for path in data if path not in self._js])
def add_css(self, data):
if data:
for medium, paths in data.items():
self._css.setdefault(medium, []).extend([path for path in paths if path not in self._css[medium]])
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
if hasattr(super(cls, self), 'media'):
base = super(cls, self).media
else:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend == True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"Metaclass for classes that can have media definitions"
def __new__(cls, name, bases, attrs):
new_class = super(MediaDefiningClass, cls).__new__(cls, name, bases,
attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
class Widget(object):
__metaclass__ = MediaDefiningClass
is_hidden = False # Determines whether this corresponds to an <input type="hidden">.
needs_multipart_form = False # Determines does this widget need multipart-encrypted form
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
def render(self, name, value, attrs=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
return data.get(name, None)
def _has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or inital value we get
# is None, replace it w/ u''.
if data is None:
data_value = u''
else:
data_value = data
if initial is None:
initial_value = u''
else:
initial_value = initial
if force_unicode(initial_value) != force_unicode(data_value):
return True
return False
def id_for_label(self, id_):
"""
Returns the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Returns None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
id_for_label = classmethod(id_for_label)
class Input(Widget):
"""
Base class for all <input> widgets (except type='checkbox' and
type='radio', which are special).
"""
input_type = None # Subclasses must define this.
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(value)
return mark_safe(u'<input%s />' % flatatt(final_attrs))
class TextInput(Input):
input_type = 'text'
class PasswordInput(Input):
input_type = 'password'
def __init__(self, attrs=None, render_value=True):
super(PasswordInput, self).__init__(attrs)
self.render_value = render_value
def render(self, name, value, attrs=None):
if not self.render_value: value=None
return super(PasswordInput, self).render(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
is_hidden = True
class MultipleHiddenInput(HiddenInput):
"""
A widget that handles <input type="hidden"> for fields that have a list
of values.
"""
def __init__(self, attrs=None, choices=()):
super(MultipleHiddenInput, self).__init__(attrs)
# choices can be any iterable
self.choices = choices
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
return mark_safe(u'\n'.join([(u'<input%s />' %
flatatt(dict(value=force_unicode(v), **final_attrs)))
for v in value]))
def value_from_datadict(self, data, files, name):
if isinstance(data, (MultiValueDict, MergeDict)):
return data.getlist(name)
return data.get(name, None)
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
def render(self, name, value, attrs=None):
return super(FileInput, self).render(name, None, attrs=attrs)
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name, None)
def _has_changed(self, initial, data):
if data is None:
return False
return True
class Textarea(Widget):
def __init__(self, attrs=None):
# The 'rows' and 'cols' attributes are required for HTML correctness.
self.attrs = {'cols': '40', 'rows': '10'}
if attrs:
self.attrs.update(attrs)
def render(self, name, value, attrs=None):
if value is None: value = ''
value = force_unicode(value)
final_attrs = self.build_attrs(attrs, name=name)
return mark_safe(u'<textarea%s>%s</textarea>' % (flatatt(final_attrs),
conditional_escape(force_unicode(value))))
class DateTimeInput(Input):
input_type = 'text'
format = '%Y-%m-%d %H:%M:%S' # '2006-10-25 14:30:59'
def __init__(self, attrs=None, format=None):
super(DateTimeInput, self).__init__(attrs)
if format:
self.format = format
def render(self, name, value, attrs=None):
if value is None:
value = ''
elif hasattr(value, 'strftime'):
value = datetime_safe.new_datetime(value)
value = value.strftime(self.format)
return super(DateTimeInput, self).render(name, value, attrs)
class TimeInput(Input):
input_type = 'text'
def render(self, name, value, attrs=None):
if value is None:
value = ''
elif isinstance(value, time):
value = value.replace(microsecond=0)
return super(TimeInput, self).render(name, value, attrs)
class CheckboxInput(Widget):
def __init__(self, attrs=None, check_test=bool):
super(CheckboxInput, self).__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = check_test
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
try:
result = self.check_test(value)
except: # Silently catch exceptions
result = False
if result:
final_attrs['checked'] = 'checked'
if value not in ('', True, False, None):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(value)
return mark_safe(u'<input%s />' % flatatt(final_attrs))
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
return super(CheckboxInput, self).value_from_datadict(data, files, name)
def _has_changed(self, initial, data):
# Sometimes data or initial could be None or u'' which should be the
# same thing as False.
return bool(initial) != bool(data)
class Select(Widget):
def __init__(self, attrs=None, choices=()):
super(Select, self).__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def render(self, name, value, attrs=None, choices=()):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = [u'<select%s>' % flatatt(final_attrs)]
options = self.render_options(choices, [value])
if options:
output.append(options)
output.append('</select>')
return mark_safe(u'\n'.join(output))
def render_options(self, choices, selected_choices):
def render_option(option_value, option_label):
option_value = force_unicode(option_value)
selected_html = (option_value in selected_choices) and u' selected="selected"' or ''
return u'<option value="%s"%s>%s</option>' % (
escape(option_value), selected_html,
conditional_escape(force_unicode(option_label)))
# Normalize to strings.
selected_choices = set([force_unicode(v) for v in selected_choices])
output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
output.append(u'<optgroup label="%s">' % escape(force_unicode(option_value)))
for option in option_label:
output.append(render_option(*option))
output.append(u'</optgroup>')
else:
output.append(render_option(option_value, option_label))
return u'\n'.join(output)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = ((u'1', ugettext('Unknown')), (u'2', ugettext('Yes')), (u'3', ugettext('No')))
super(NullBooleanSelect, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
try:
value = {True: u'2', False: u'3', u'2': u'2', u'3': u'3'}[value]
except KeyError:
value = u'1'
return super(NullBooleanSelect, self).render(name, value, attrs, choices)
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
return {u'2': True, u'3': False, True: True, False: False}.get(value, None)
def _has_changed(self, initial, data):
# Sometimes data or initial could be None or u'' which should be the
# same thing as False.
return bool(initial) != bool(data)
class SelectMultiple(Select):
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [u'<select multiple="multiple"%s>' % flatatt(final_attrs)]
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</select>')
return mark_safe(u'\n'.join(output))
def value_from_datadict(self, data, files, name):
if isinstance(data, (MultiValueDict, MergeDict)):
return data.getlist(name)
return data.get(name, None)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
for value1, value2 in zip(initial, data):
if force_unicode(value1) != force_unicode(value2):
return True
return False
class RadioInput(StrAndUnicode):
"""
An object used by RadioFieldRenderer that represents a single
<input type='radio'>.
"""
def __init__(self, name, value, attrs, choice, index):
self.name, self.value = name, value
self.attrs = attrs
self.choice_value = force_unicode(choice[0])
self.choice_label = force_unicode(choice[1])
self.index = index
def __unicode__(self):
if 'id' in self.attrs:
label_for = ' for="%s_%s"' % (self.attrs['id'], self.index)
else:
label_for = ''
choice_label = conditional_escape(force_unicode(self.choice_label))
return mark_safe(u'<label%s>%s %s</label>' % (label_for, self.tag(), choice_label))
def is_checked(self):
return self.value == self.choice_value
def tag(self):
if 'id' in self.attrs:
self.attrs['id'] = '%s_%s' % (self.attrs['id'], self.index)
final_attrs = dict(self.attrs, type='radio', name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return mark_safe(u'<input%s />' % flatatt(final_attrs))
class RadioFieldRenderer(StrAndUnicode):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
def __init__(self, name, value, attrs, choices):
self.name, self.value, self.attrs = name, value, attrs
self.choices = choices
def __iter__(self):
for i, choice in enumerate(self.choices):
yield RadioInput(self.name, self.value, self.attrs.copy(), choice, i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return RadioInput(self.name, self.value, self.attrs.copy(), choice, idx)
def __unicode__(self):
return self.render()
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return mark_safe(u'<ul>\n%s\n</ul>' % u'\n'.join([u'<li>%s</li>'
% force_unicode(w) for w in self]))
class RadioSelect(Select):
renderer = RadioFieldRenderer
def __init__(self, *args, **kwargs):
# Override the default renderer if we were passed one.
renderer = kwargs.pop('renderer', None)
if renderer:
self.renderer = renderer
super(RadioSelect, self).__init__(*args, **kwargs)
def get_renderer(self, name, value, attrs=None, choices=()):
"""Returns an instance of the renderer."""
if value is None: value = ''
str_value = force_unicode(value) # Normalize to string.
final_attrs = self.build_attrs(attrs)
choices = list(chain(self.choices, choices))
return self.renderer(name, str_value, final_attrs, choices)
def render(self, name, value, attrs=None, choices=()):
return self.get_renderer(name, value, attrs, choices).render()
def id_for_label(self, id_):
# RadioSelect is represented by multiple <input type="radio"> fields,
# each of which has a distinct ID. The IDs are made distinct by a "_X"
# suffix, where X is the zero-based index of the radio field. Thus,
# the label for a RadioSelect should reference the first one ('_0').
if id_:
id_ += '_0'
return id_
id_for_label = classmethod(id_for_label)
class CheckboxSelectMultiple(SelectMultiple):
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
output = [u'<ul>']
# Normalize to strings
str_values = set([force_unicode(v) for v in value])
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
label_for = u' for="%s"' % final_attrs['id']
else:
label_for = ''
cb = CheckboxInput(final_attrs, check_test=lambda value: value in str_values)
option_value = force_unicode(option_value)
rendered_cb = cb.render(name, option_value)
option_label = conditional_escape(force_unicode(option_label))
output.append(u'<li><label%s>%s %s</label></li>' % (label_for, rendered_cb, option_label))
output.append(u'</ul>')
return mark_safe(u'\n'.join(output))
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
id_for_label = classmethod(id_for_label)
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
Its render() method is different than other widgets', because it has to
figure out how to split a single value for display in multiple widgets.
The ``value`` argument can be one of two things:
* A list.
* A normal value (e.g., a string) that has been "compressed" from
a list of values.
In the second case -- i.e., if the value is NOT a list -- render() will
first "decompress" the value into a list before rendering it. It does so by
calling the decompress() method, which MultiWidget subclasses must
implement. This method takes a single "compressed" value and returns a
list.
When render() does its HTML rendering, each value in the list is rendered
with the corresponding widget -- the first value is rendered in the first
widget, the second value is rendered in the second widget, etc.
Subclasses may implement format_output(), which takes the list of rendered
widgets and returns a string of HTML that formats them any way you'd like.
You'll probably want to use this class with MultiValueField.
"""
def __init__(self, widgets, attrs=None):
self.widgets = [isinstance(w, type) and w() or w for w in widgets]
super(MultiWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
id_for_label = classmethod(id_for_label)
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def _has_changed(self, initial, data):
if initial is None:
initial = [u'' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.decompress(initial)
for widget, initial, data in zip(self.widgets, initial, data):
if widget._has_changed(initial, data):
return True
return False
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
This hook allows you to format the HTML design of the widgets, if
needed.
"""
return u''.join(rendered_widgets)
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"Media for a multiwidget is the combination of all media of the subwidgets"
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
class SplitDateTimeWidget(MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
"""
def __init__(self, attrs=None):
widgets = (TextInput(attrs=attrs), TextInput(attrs=attrs))
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A Widget that splits datetime input into two <input type="hidden"> inputs.
"""
def __init__(self, attrs=None):
widgets = (HiddenInput(attrs=attrs), HiddenInput(attrs=attrs))
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
| AloneRoad/Inforlearn | vendor/django/forms/widgets.py | Python | apache-2.0 | 25,335 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Model zoo for pre-trained models."""
__all__ = ['get_model_file', 'purge']
import os
import zipfile
import logging
import tempfile
import uuid
import shutil
from ..utils import download, check_sha1, replace_file
from ... import base
_model_sha1 = {name: checksum for checksum, name in [
('44335d1f0046b328243b32a26a4fbd62d9057b45', 'alexnet'),
('f27dbf2dbd5ce9a80b102d89c7483342cd33cb31', 'densenet121'),
('b6c8a95717e3e761bd88d145f4d0a214aaa515dc', 'densenet161'),
('2603f878403c6aa5a71a124c4a3307143d6820e9', 'densenet169'),
('1cdbc116bc3a1b65832b18cf53e1cb8e7da017eb', 'densenet201'),
('ed47ec45a937b656fcc94dabde85495bbef5ba1f', 'inceptionv3'),
('9f83e440996887baf91a6aff1cccc1c903a64274', 'mobilenet0.25'),
('8e9d539cc66aa5efa71c4b6af983b936ab8701c3', 'mobilenet0.5'),
('529b2c7f4934e6cb851155b22c96c9ab0a7c4dc2', 'mobilenet0.75'),
('6b8c5106c730e8750bcd82ceb75220a3351157cd', 'mobilenet1.0'),
('36da4ff1867abccd32b29592d79fc753bca5a215', 'mobilenetv2_1.0'),
('e2be7b72a79fe4a750d1dd415afedf01c3ea818d', 'mobilenetv2_0.75'),
('aabd26cd335379fcb72ae6c8fac45a70eab11785', 'mobilenetv2_0.5'),
('ae8f9392789b04822cbb1d98c27283fc5f8aa0a7', 'mobilenetv2_0.25'),
('a0666292f0a30ff61f857b0b66efc0228eb6a54b', 'resnet18_v1'),
('48216ba99a8b1005d75c0f3a0c422301a0473233', 'resnet34_v1'),
('0aee57f96768c0a2d5b23a6ec91eb08dfb0a45ce', 'resnet50_v1'),
('d988c13d6159779e907140a638c56f229634cb02', 'resnet101_v1'),
('671c637a14387ab9e2654eafd0d493d86b1c8579', 'resnet152_v1'),
('a81db45fd7b7a2d12ab97cd88ef0a5ac48b8f657', 'resnet18_v2'),
('9d6b80bbc35169de6b6edecffdd6047c56fdd322', 'resnet34_v2'),
('ecdde35339c1aadbec4f547857078e734a76fb49', 'resnet50_v2'),
('18e93e4f48947e002547f50eabbcc9c83e516aa6', 'resnet101_v2'),
('f2695542de38cf7e71ed58f02893d82bb409415e', 'resnet152_v2'),
('264ba4970a0cc87a4f15c96e25246a1307caf523', 'squeezenet1.0'),
('33ba0f93753c83d86e1eb397f38a667eaf2e9376', 'squeezenet1.1'),
('dd221b160977f36a53f464cb54648d227c707a05', 'vgg11'),
('ee79a8098a91fbe05b7a973fed2017a6117723a8', 'vgg11_bn'),
('6bc5de58a05a5e2e7f493e2d75a580d83efde38c', 'vgg13'),
('7d97a06c3c7a1aecc88b6e7385c2b373a249e95e', 'vgg13_bn'),
('e660d4569ccb679ec68f1fd3cce07a387252a90a', 'vgg16'),
('7f01cf050d357127a73826045c245041b0df7363', 'vgg16_bn'),
('ad2f660d101905472b83590b59708b71ea22b2e5', 'vgg19'),
('f360b758e856f1074a85abd5fd873ed1d98297c3', 'vgg19_bn')]}
apache_repo_url = 'https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/'
_url_format = '{repo_url}gluon/models/{file_name}.zip'
def short_hash(name):
if name not in _model_sha1:
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
return _model_sha1[name][:8]
def get_model_file(name, root=os.path.join(base.data_dir(), 'models')):
r"""Return location for the pretrained on local file system.
This function will download from online model zoo when model cannot be found or has mismatch.
The root directory will be created if it doesn't exist.
Parameters
----------
name : str
Name of the model.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file.
"""
file_name = '{name}-{short_hash}'.format(name=name,
short_hash=short_hash(name))
root = os.path.expanduser(root)
file_path = os.path.join(root, file_name+'.params')
sha1_hash = _model_sha1[name]
if os.path.exists(file_path):
if check_sha1(file_path, sha1_hash):
return file_path
else:
logging.warning('Mismatch in the content of model file detected. Downloading again.')
else:
logging.info('Model file not found. Downloading to %s.', file_path)
os.makedirs(root, exist_ok=True)
repo_url = os.environ.get('MXNET_GLUON_REPO', apache_repo_url)
if repo_url[-1] != '/':
repo_url = repo_url + '/'
random_uuid = str(uuid.uuid4())
temp_zip_file_path = os.path.join(root, file_name+'.zip'+random_uuid)
download(_url_format.format(repo_url=repo_url, file_name=file_name),
path=temp_zip_file_path, overwrite=True)
with zipfile.ZipFile(temp_zip_file_path) as zf:
temp_dir = tempfile.mkdtemp(dir=root)
zf.extractall(temp_dir)
temp_file_path = os.path.join(temp_dir, file_name+'.params')
replace_file(temp_file_path, file_path)
shutil.rmtree(temp_dir)
os.remove(temp_zip_file_path)
if check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError('Downloaded file has different hash. Please try again.')
def purge(root=os.path.join(base.data_dir(), 'models')):
r"""Purge all pretrained model files in local file store.
Parameters
----------
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
root = os.path.expanduser(root)
files = os.listdir(root)
for f in files:
if f.endswith(".params"):
os.remove(os.path.join(root, f))
| szha/mxnet | python/mxnet/gluon/model_zoo/model_store.py | Python | apache-2.0 | 6,036 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getopt, glob, os, sys
def main(argv):
f1 = ""
f2 = ""
# Get the base folder
try:
opts, args = getopt.getopt(argv, "h", ["f1=", "f2="])
except getopt.GetoptError:
print 'The file options for build_saxon_collection_xml.py were not correctly specified.'
print 'To see a full list of options try:'
print ' $ python build_saxon_collection_xml.py -h'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'Options:'
print ' -f The base folder to create collection XML file.'
sys.exit()
elif opt in ('--f1'):
# check if file exists.
if os.path.exists(arg):
f1 = arg
else:
print 'Error: Argument must be a file name for --f1.'
sys.exit()
elif opt in ('--f2'):
# check if file exists.
if os.path.exists(arg):
f2 = arg
else:
print 'Error: Argument must be a file name for --f2.'
sys.exit()
# Required fields to run the script.
if f1 == "" or not os.path.exists(f1):
print 'Error: The file path option must be supplied: --f1.'
sys.exit()
if f2 == "" or not os.path.exists(f2):
print 'Error: The file path option must be supplied: --f2.'
sys.exit()
missing_in_f1 = []
missing_in_f2 = []
found_in_both = []
with open(f1) as f:
content_f1 = f.readlines()
set_f1 = set(content_f1)
with open(f2) as f:
content_f2 = f.readlines()
set_f2 = set(content_f2)
missing_in_f1 = set_f2.difference(set_f1)
missing_in_f2 = set_f1.difference(set_f2)
found_in_both = set_f1.intersection(set_f2)
print ""
print "Missing files in " + f1
for f1_name in missing_in_f1:
print " + " + f1_name.strip()
print ""
print "Missing files in " + f2
for f2_name in missing_in_f2:
print " + " + f2_name.strip()
offset = 40
print ""
print "XML Summary"
print (" - Found in both:").ljust(offset) + str(len(found_in_both))
print (" - " + f1 + " diff set vs list:").ljust(offset) + str(len(content_f1) - len(set_f1))
print (" - " + f2 + " diff set vs list:").ljust(offset) + str(len(content_f2) - len(set_f2))
print (" - " + f1 + " missing:").ljust(offset) + str(len(missing_in_f1))
print (" - " + f2 + " missing:").ljust(offset) + str(len(missing_in_f2))
if __name__ == "__main__":
main(sys.argv[1:])
| innovimax/vxquery | vxquery-benchmark/src/main/resources/util/diff_xml_files.py | Python | apache-2.0 | 3,401 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MNIST handwritten digits dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.datasets.mnist.load_data')
def load_data(path='mnist.npz'):
"""Loads the MNIST dataset.
Arguments:
path: path where to cache the dataset locally
(relative to ~/.keras/datasets).
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
License:
Yann LeCun and Corinna Cortes hold the copyright of MNIST dataset,
which is a derivative work from original NIST datasets.
MNIST dataset is made available under the terms of the
[Creative Commons Attribution-Share Alike 3.0 license.](
https://creativecommons.org/licenses/by-sa/3.0/)
"""
origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'
path = get_file(
path,
origin=origin_folder + 'mnist.npz',
file_hash='8a61469f7ea1b51cbae51d4f78837e45')
with np.load(path) as f:
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
return (x_train, y_train), (x_test, y_test)
| ghchinoy/tensorflow | tensorflow/python/keras/datasets/mnist.py | Python | apache-2.0 | 1,993 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import time
from os import listdir
from os.path import join
from Queue import Queue, Empty
from select import select
from threading import Thread, Lock
from odoo import http
import odoo.addons.hw_proxy.controllers.main as hw_proxy
_logger = logging.getLogger(__name__)
try:
import evdev
except ImportError:
_logger.error('Odoo module hw_scanner depends on the evdev python module')
evdev = None
class ScannerDevice():
def __init__(self, path):
self.evdev = evdev.InputDevice(path)
self.evdev.grab()
self.barcode = []
self.shift = False
class Scanner(Thread):
def __init__(self):
Thread.__init__(self)
self.lock = Lock()
self.status = {'status':'connecting', 'messages':[]}
self.input_dir = '/dev/input/by-id/'
self.open_devices = []
self.barcodes = Queue()
self.keymap = {
2: ("1","!"),
3: ("2","@"),
4: ("3","#"),
5: ("4","$"),
6: ("5","%"),
7: ("6","^"),
8: ("7","&"),
9: ("8","*"),
10:("9","("),
11:("0",")"),
12:("-","_"),
13:("=","+"),
# 14 BACKSPACE
# 15 TAB
16:("q","Q"),
17:("w","W"),
18:("e","E"),
19:("r","R"),
20:("t","T"),
21:("y","Y"),
22:("u","U"),
23:("i","I"),
24:("o","O"),
25:("p","P"),
26:("[","{"),
27:("]","}"),
# 28 ENTER
# 29 LEFT_CTRL
30:("a","A"),
31:("s","S"),
32:("d","D"),
33:("f","F"),
34:("g","G"),
35:("h","H"),
36:("j","J"),
37:("k","K"),
38:("l","L"),
39:(";",":"),
40:("'","\""),
41:("`","~"),
# 42 LEFT SHIFT
43:("\\","|"),
44:("z","Z"),
45:("x","X"),
46:("c","C"),
47:("v","V"),
48:("b","B"),
49:("n","N"),
50:("m","M"),
51:(",","<"),
52:(".",">"),
53:("/","?"),
# 54 RIGHT SHIFT
57:(" "," "),
}
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def set_status(self, status, message = None):
if status == self.status['status']:
if message != None and message != self.status['messages'][-1]:
self.status['messages'].append(message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('Barcode Scanner Error: '+message)
elif status == 'disconnected' and message:
_logger.info('Disconnected Barcode Scanner: %s', message)
def get_devices(self):
try:
if not evdev:
return None
new_devices = [device for device in listdir(self.input_dir)
if join(self.input_dir, device) not in [dev.evdev.fn for dev in self.open_devices]]
scanners = [device for device in new_devices
if (('kbd' in device) and ('keyboard' not in device.lower()))
or ('barcode' in device.lower()) or ('scanner' in device.lower())]
for device in scanners:
_logger.debug('opening device %s', join(self.input_dir,device))
self.open_devices.append(ScannerDevice(join(self.input_dir,device)))
if self.open_devices:
self.set_status('connected','Connected to '+ str([dev.evdev.name for dev in self.open_devices]))
else:
self.set_status('disconnected','Barcode Scanner Not Found')
return self.open_devices
except Exception as e:
self.set_status('error',str(e))
return []
def release_device(self, dev):
self.open_devices.remove(dev)
def get_barcode(self):
""" Returns a scanned barcode. Will wait at most 5 seconds to get a barcode, and will
return barcode scanned in the past if they are not older than 5 seconds and have not
been returned before. This is necessary to catch barcodes scanned while the POS is
busy reading another barcode
"""
self.lockedstart()
while True:
try:
timestamp, barcode = self.barcodes.get(True, 5)
if timestamp > time.time() - 5:
return barcode
except Empty:
return ''
def get_status(self):
self.lockedstart()
return self.status
def _get_open_device_by_fd(self, fd):
for dev in self.open_devices:
if dev.evdev.fd == fd:
return dev
def run(self):
""" This will start a loop that catches all keyboard events, parse barcode
sequences and put them on a timestamped queue that can be consumed by
the point of sale's requests for barcode events
"""
self.barcodes = Queue()
barcode = []
shift = False
devices = None
while True: # barcodes loop
devices = self.get_devices()
try:
while True: # keycode loop
r,w,x = select({dev.fd: dev for dev in [d.evdev for d in devices]},[],[],5)
if len(r) == 0: # timeout
break
for fd in r:
device = self._get_open_device_by_fd(fd)
if not evdev.util.is_device(device.evdev.fn):
_logger.info('%s disconnected', str(device.evdev))
self.release_device(device)
break
events = device.evdev.read()
for event in events:
if event.type == evdev.ecodes.EV_KEY:
# _logger.debug('Evdev Keyboard event %s',evdev.categorize(event))
if event.value == 1: # keydown events
if event.code in self.keymap:
if device.shift:
device.barcode.append(self.keymap[event.code][1])
else:
device.barcode.append(self.keymap[event.code][0])
elif event.code == 42 or event.code == 54: # SHIFT
device.shift = True
elif event.code == 28: # ENTER, end of barcode
_logger.debug('pushing barcode %s from %s', ''.join(device.barcode), str(device.evdev))
self.barcodes.put( (time.time(),''.join(device.barcode)) )
device.barcode = []
elif event.value == 0: #keyup events
if event.code == 42 or event.code == 54: # LEFT SHIFT
device.shift = False
except Exception as e:
self.set_status('error',str(e))
scanner_thread = None
if evdev:
scanner_thread = Scanner()
hw_proxy.drivers['scanner'] = scanner_thread
class ScannerDriver(hw_proxy.Proxy):
@http.route('/hw_proxy/scanner', type='json', auth='none', cors='*')
def scanner(self):
return scanner_thread.get_barcode() if scanner_thread else None
| chienlieu2017/it_management | odoo/addons/hw_scanner/controllers/main.py | Python | gpl-3.0 | 8,110 |
# (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for
:func:`iris.fileformats.pp_rules._reshape_vector_args`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.fileformats.pp_rules import _reshape_vector_args
class TestEmpty(tests.IrisTest):
def test(self):
result = _reshape_vector_args([])
self.assertEqual(result, [])
class TestSingleArg(tests.IrisTest):
def _check(self, result, expected):
self.assertEqual(len(result), len(expected))
for result_arr, expected_arr in zip(result, expected):
self.assertArrayEqual(result_arr, expected_arr)
def test_nochange(self):
points = np.array([[1, 2, 3], [4, 5, 6]])
result = _reshape_vector_args([(points, (0, 1))])
expected = [points]
self._check(result, expected)
def test_bad_dimensions(self):
points = np.array([[1, 2, 3], [4, 5, 6]])
with self.assertRaisesRegexp(ValueError, 'Length'):
_reshape_vector_args([(points, (0, 1, 2))])
def test_scalar(self):
points = 5
result = _reshape_vector_args([(points, ())])
expected = [points]
self._check(result, expected)
def test_nonarray(self):
points = [[1, 2, 3], [4, 5, 6]]
result = _reshape_vector_args([(points, (0, 1))])
expected = [np.array(points)]
self._check(result, expected)
def test_transpose(self):
points = np.array([[1, 2, 3], [4, 5, 6]])
result = _reshape_vector_args([(points, (1, 0))])
expected = [points.T]
self._check(result, expected)
def test_extend(self):
points = np.array([[1, 2, 3, 4], [21, 22, 23, 24], [31, 32, 33, 34]])
result = _reshape_vector_args([(points, (1, 3))])
expected = [points.reshape(1, 3, 1, 4)]
self._check(result, expected)
class TestMultipleArgs(tests.IrisTest):
def _check(self, result, expected):
self.assertEqual(len(result), len(expected))
for result_arr, expected_arr in zip(result, expected):
self.assertArrayEqual(result_arr, expected_arr)
def test_nochange(self):
a1 = np.array([[1, 2, 3], [4, 5, 6]])
a2 = np.array([[0, 2, 4], [7, 8, 9]])
result = _reshape_vector_args([(a1, (0, 1)), (a2, (0, 1))])
expected = [a1, a2]
self._check(result, expected)
def test_array_and_scalar(self):
a1 = [[1, 2, 3], [3, 4, 5]]
a2 = 5
result = _reshape_vector_args([(a1, (0, 1)), (a2, ())])
expected = [a1, np.array([[5]])]
self._check(result, expected)
def test_transpose(self):
a1 = np.array([[1, 2, 3], [4, 5, 6]])
a2 = np.array([[0, 2, 4], [7, 8, 9]])
result = _reshape_vector_args([(a1, (0, 1)), (a2, (1, 0))])
expected = [a1, a2.T]
self._check(result, expected)
def test_incompatible(self):
# Does not enforce compatibility of results.
a1 = np.array([1, 2])
a2 = np.array([1, 2, 3])
result = _reshape_vector_args([(a1, (0,)), (a2, (0,))])
expected = [a1, a2]
self._check(result, expected)
def test_extend(self):
a1 = np.array([[1, 2, 3], [4, 5, 6]])
a2 = np.array([11, 12, 13])
result = _reshape_vector_args([(a1, (0, 1)), (a2, (1,))])
expected = [a1, a2.reshape(1, 3)]
self._check(result, expected)
def test_extend_transpose(self):
a1 = np.array([[1, 2, 3], [4, 5, 6]])
a2 = np.array([11, 12, 13])
result = _reshape_vector_args([(a1, (1, 0)), (a2, (1,))])
expected = [a1.T, a2.reshape(1, 3)]
self._check(result, expected)
def test_double_extend(self):
a1 = np.array([[1, 2, 3], [4, 5, 6]])
a2 = np.array(1)
result = _reshape_vector_args([(a1, (0, 2)), (a2, ())])
expected = [a1.reshape(2, 1, 3), a2.reshape(1, 1, 1)]
self._check(result, expected)
def test_triple(self):
a1 = np.array([[1, 2, 3, 4]])
a2 = np.array([3, 4])
a3 = np.array(7)
result = _reshape_vector_args([(a1, (0, 2)), (a2, (1,)), (a3, ())])
expected = [a1.reshape(1, 1, 4),
a2.reshape(1, 2, 1),
a3.reshape(1, 1, 1)]
self._check(result, expected)
if __name__ == "__main__":
tests.main()
| mo-g/iris | lib/iris/tests/unit/fileformats/pp_rules/test__reshape_vector_args.py | Python | gpl-3.0 | 5,242 |
###############################################################################
##
## Copyright 2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
## Tavendo WebMQ Application Key and Secret for our Web app
APPKEY = 'foobar'
APPSECRET = 'secret'
## The "user database" of our Web app
USERDB = {'joe': 'secret', 'admin': 'hoho'}
import json, uuid, sys
from flask import Flask, url_for, Response, request, session, \
render_template, redirect, escape, flash
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
import hmac, hashlib, binascii
def authSignature(authChallenge, authSecret = None):
if authSecret is None:
authSecret = ""
h = hmac.new(authSecret, authChallenge, hashlib.sha256)
sig = binascii.b2a_base64(h.digest()).strip()
return sig
@app.route('/')
def index():
if 'username' in session:
return render_template('index.html',
server = sys.argv[1],
topic = "http://example.com/simple")
else:
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
username = request.form['username']
if not USERDB.has_key(username) or \
USERDB[username] != request.form['password'] != 'secret':
error = 'Invalid credentials'
else:
flash("You were successfully logged in as '%s'" % username)
session['username'] = username
return redirect(url_for('index'))
return render_template('login.html', error = error)
@app.route('/logout')
def logout():
session.pop('username', None)
return redirect(url_for('index'))
@app.route('/authsign', methods = ['POST'])
def authsign():
if 'username' in session:
try:
data = json.loads(request.data)
print "Challenge:", data
if data['authkey'] == APPKEY:
sig = authSignature(request.data, APPSECRET)
print "Signature:", sig
return sig
except Expection, e:
print e
return ""
if __name__ == "__main__":
app.run(host = "0.0.0.0", port = 8000, debug = True)
| CSF-JH/crossbarexamples | rest/needs_cleanup/python/example/auth/__init__.py | Python | apache-2.0 | 2,784 |
from contextlib import contextmanager
from mpconstants.countries import COUNTRY_DETAILS
from nose.tools import eq_, ok_
from tower import activate
import mkt.constants.regions as regions
from mkt.site.tests import TestCase
class TestRegions(TestCase):
def test_no_missing_region(self):
"""Test that we haven't forgotten to add some regions to the lookup
dictionary."""
defined_regions = regions.REGION_LOOKUP.keys()
available_regions = {c['slug'] for c in COUNTRY_DETAILS.values()}
eq_(list(available_regions.difference(defined_regions)), [])
def test_regions_dict(self):
eq_(regions.REGIONS_DICT['restofworld'], regions.RESTOFWORLD)
eq_(regions.REGIONS_DICT['us'], regions.USA)
for region in regions.REGIONS_DICT.values():
# Make sure the regions dict contains region objects.
ok_(issubclass(region, regions.REGION))
# Make sure we can find each region in the regions module locals.
eq_(getattr(regions, region.__name__), region)
# Make sure the name is a lazy unicode object - encoding it to
# utf-8 it should work.
ok_(region.name.encode('utf-8'))
class TestRegionContentRatings(TestCase):
@contextmanager
def tower_activate(self, region):
try:
activate(region)
yield
finally:
activate('en-US')
def test_region_to_ratings_body(self):
region_to_body = regions.REGION_TO_RATINGS_BODY()
eq_(region_to_body['br'], 'classind')
eq_(region_to_body['es'], 'pegi')
eq_(region_to_body['de'], 'usk')
eq_(region_to_body['us'], 'esrb')
def test_name_sorted_regions_eq_slug_sorted_regions(self):
"""Check data is the same, irrespective of ordering."""
self.assertEqual(len(regions.REGIONS_CHOICES_NAME),
len(regions.REGIONS_CHOICES_SORTED_BY_NAME()))
self.assertSetEqual(regions.REGIONS_CHOICES_NAME,
regions.REGIONS_CHOICES_SORTED_BY_NAME())
def test_rest_of_world_last_regions_by_slug(self):
eq_(regions.REGIONS_CHOICES_NAME[-1][1], regions.RESTOFWORLD.name)
def test_rest_of_world_last_regions_by_name(self):
eq_(regions.REGIONS_CHOICES_SORTED_BY_NAME()[-1][1],
regions.RESTOFWORLD.name)
def test_localized_sorting_of_region_choices_pl(self):
with self.tower_activate('pl'):
region_names_pl = [r[1] for r in
regions.REGIONS_CHOICES_SORTED_BY_NAME()]
ok_(region_names_pl.index(regions.ESP.name) <
region_names_pl.index(regions.GBR.name))
ok_(region_names_pl.index(regions.GBR.name) >
region_names_pl.index(regions.USA.name))
def test_localized_sorting_of_region_choices_fr(self):
with self.tower_activate('fr'):
region_names_fr = [unicode(r[1]) for r in
regions.REGIONS_CHOICES_SORTED_BY_NAME()]
ok_(region_names_fr.index(regions.ESP.name) <
region_names_fr.index(regions.USA.name))
ok_(region_names_fr.index(regions.USA.name) <
region_names_fr.index(regions.GBR.name))
| jamesthechamp/zamboni | mkt/constants/tests/test_regions.py | Python | bsd-3-clause | 3,275 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_udld_interface
version_added: "2.2"
short_description: Manages UDLD interface configuration params.
description:
- Manages UDLD interface configuration params.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
notes:
- Feature UDLD must be enabled on the device to use this module.
options:
mode:
description:
- Manages UDLD mode for an interface.
required: true
choices: ['enabled','disabled','aggressive']
interface:
description:
- FULL name of the interface, i.e. Ethernet1/1-
required: true
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure Ethernet1/1 is configured to be in aggressive mode
- nxos_udld_interface:
interface: Ethernet1/1
mode: aggressive
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Remove the aggressive config only if it's currently in aggressive mode and then disable udld (switch default)
- nxos_udld_interface:
interface: Ethernet1/1
mode: aggressive
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ensure Ethernet1/1 has aggressive mode enabled
- nxos_udld_interface:
interface: Ethernet1/1
mode: enabled
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"mode": "enabled"}
existing:
description:
- k/v pairs of existing configuration
type: dict
sample: {"mode": "aggressive"}
end_state:
description: k/v pairs of configuration after module execution
returned: always
type: dict
sample: {"mode": "enabled"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["interface ethernet1/33",
"no udld aggressive ; no udld disable"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0] or response[0] == '\n':
body = []
elif 'show run' in command:
body = response
else:
try:
body = [json.loads(response[0])]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_udld_interface(module, interface):
command = 'show udld {0}'.format(interface)
interface_udld = {}
mode = None
try:
body = execute_show_command(command, module)[0]
table = body['TABLE_interface']['ROW_interface']
status = str(table.get('mib-port-status', None))
# Note: 'mib-aggresive-mode' is NOT a typo
agg = str(table.get('mib-aggresive-mode', 'disabled'))
if agg == 'enabled':
mode = 'aggressive'
else:
mode = status
interface_udld['mode'] = mode
except (KeyError, AttributeError, IndexError):
interface_udld = {}
return interface_udld
def is_interface_copper(module, interface):
command = 'show interface status'
copper = []
try:
body = execute_show_command(command, module)[0]
table = body['TABLE_interface']['ROW_interface']
for each in table:
itype = each.get('type', 'DNE')
if 'CU' in itype or '1000' in itype or '10GBaseT' in itype:
copper.append(str(each['interface'].lower()))
except (KeyError, AttributeError):
pass
if interface in copper:
found = True
else:
found = False
return found
def get_commands_config_udld_interface(delta, interface, module, existing):
commands = []
copper = is_interface_copper(module, interface)
if delta:
mode = delta['mode']
if mode == 'aggressive':
command = 'udld aggressive'
elif copper:
if mode == 'enabled':
if existing['mode'] == 'aggressive':
command = 'no udld aggressive ; udld enable'
else:
command = 'udld enable'
elif mode == 'disabled':
command = 'no udld enable'
elif not copper:
if mode == 'enabled':
if existing['mode'] == 'aggressive':
command = 'no udld aggressive ; no udld disable'
else:
command = 'no udld disable'
elif mode == 'disabled':
command = 'udld disable'
if command:
commands.append(command)
commands.insert(0, 'interface {0}'.format(interface))
return commands
def get_commands_remove_udld_interface(delta, interface, module, existing):
commands = []
copper = is_interface_copper(module, interface)
if delta:
mode = delta['mode']
if mode == 'aggressive':
command = 'no udld aggressive'
elif copper:
if mode == 'enabled':
command = 'no udld enable'
elif mode == 'disabled':
command = 'udld enable'
elif not copper:
if mode == 'enabled':
command = 'udld disable'
elif mode == 'disabled':
command = 'no udld disable'
if command:
commands.append(command)
commands.insert(0, 'interface {0}'.format(interface))
return commands
def main():
argument_spec = dict(
mode=dict(choices=['enabled', 'disabled', 'aggressive'],
required=True),
interface=dict(type='str', required=True),
state=dict(choices=['absent', 'present'], default='present'),
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
interface = module.params['interface'].lower()
mode = module.params['mode']
state = module.params['state']
proposed = dict(mode=mode)
existing = get_udld_interface(module, interface)
end_state = existing
delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
changed = False
commands = []
if state == 'present':
if delta:
command = get_commands_config_udld_interface(delta, interface,
module, existing)
commands.append(command)
elif state == 'absent':
common = set(proposed.iteritems()).intersection(existing.iteritems())
if common:
command = get_commands_remove_udld_interface(
dict(common), interface, module, existing
)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
execute_config_command(cmds, module)
end_state = get_udld_interface(module, interface)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
module.exit_json(**results)
if __name__ == '__main__':
main()
| gundalow/ansible-modules-core | network/nxos/nxos_udld_interface.py | Python | gpl-3.0 | 15,900 |
def foo(someParam: int):
pass
someParam = 1
foo(<caret>) | jwren/intellij-community | python/testData/codeInsight/mlcompletion/receiverMatchesSimple.py | Python | apache-2.0 | 59 |
from .m1 import f
from p1.m1 import f
from m1 import f
from a import g
<warning descr="Unused import statement 'from a import h'">from a import h</warning>
__all__ = ['f', 'g']
| jwren/intellij-community | python/testData/inspections/PyUnresolvedReferencesInspection/UnusedImportsInPackage/p1/__init__.py | Python | apache-2.0 | 178 |
import re
from django import forms
from django.conf import settings
from django.template.defaultfilters import slugify
from tower import ugettext_lazy as _lazy
from kitsune.products.models import Product, Topic
from kitsune.sumo.form_fields import MultiUsernameField, StrippedCharField
from kitsune.wiki.config import SIGNIFICANCES, CATEGORIES
from kitsune.wiki.models import (
Document, Revision, MAX_REVISION_COMMENT_LENGTH)
from kitsune.wiki.tasks import add_short_links
from kitsune.wiki.widgets import (
RadioFieldRendererWithHelpText, ProductTopicsAndSubtopicsWidget,
RelatedDocumentsWidget)
TITLE_REQUIRED = _lazy(u'Please provide a title.')
TITLE_SHORT = _lazy(u'The title is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
TITLE_LONG = _lazy(u'Please keep the length of the title to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
SLUG_REQUIRED = _lazy(u'Please provide a slug.')
SLUG_INVALID = _lazy(u'The slug provided is not valid.')
SLUG_SHORT = _lazy(u'The slug is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SLUG_LONG = _lazy(u'Please keep the length of the slug to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
SUMMARY_REQUIRED = _lazy(u'Please provide a summary.')
SUMMARY_SHORT = _lazy(u'The summary is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SUMMARY_LONG = _lazy(u'Please keep the length of the summary to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
CONTENT_REQUIRED = _lazy(u'Please provide content.')
CONTENT_SHORT = _lazy(u'The content is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
CONTENT_LONG = _lazy(u'Please keep the length of the content to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
COMMENT_LONG = _lazy(u'Please keep the length of the comment to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
PRODUCT_REQUIRED = _lazy(u'Please select at least one product.')
TOPIC_REQUIRED = _lazy(u'Please select at least one topic.')
class DocumentForm(forms.ModelForm):
"""Form to create/edit a document."""
def __init__(self, *args, **kwargs):
# Quasi-kwargs:
can_archive = kwargs.pop('can_archive', False)
can_edit_needs_change = kwargs.pop('can_edit_needs_change', False)
initial_title = kwargs.pop('initial_title', '')
super(DocumentForm, self).__init__(*args, **kwargs)
title_field = self.fields['title']
title_field.initial = initial_title
slug_field = self.fields['slug']
slug_field.initial = slugify(initial_title)
topics_field = self.fields['topics']
topics_field.choices = Topic.objects.values_list('id', 'title')
products_field = self.fields['products']
products_field.choices = Product.objects.values_list('id', 'title')
related_documents_field = self.fields['related_documents']
related_documents_field.choices = Document.objects.values_list('id', 'title')
# If user hasn't permission to frob is_archived, remove the field. This
# causes save() to skip it as well.
if not can_archive:
del self.fields['is_archived']
# If user hasn't permission to mess with needs_change*, remove the
# fields. This causes save() to skip it as well.
if not can_edit_needs_change:
del self.fields['needs_change']
del self.fields['needs_change_comment']
title = StrippedCharField(
min_length=5, max_length=255,
widget=forms.TextInput(),
label=_lazy(u'Title:'),
help_text=_lazy(u'Title of article'),
error_messages={'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG})
# We don't use forms.SlugField because it is too strict in
# what it allows (English/Roman alpha-numeric characters and dashes).
# Instead, we do custom validation in `clean_slug` below.
slug = StrippedCharField(
min_length=3, max_length=255,
widget=forms.TextInput(),
label=_lazy(u'Slug:'),
help_text=_lazy(u'Article URL'),
error_messages={'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG})
products = forms.MultipleChoiceField(
label=_lazy(u'Relevant to:'),
required=False,
widget=forms.CheckboxSelectMultiple())
is_localizable = forms.BooleanField(
initial=True,
label=_lazy(u'Allow translations:'),
required=False)
is_archived = forms.BooleanField(
label=_lazy(u'Obsolete:'),
required=False)
allow_discussion = forms.BooleanField(
label=_lazy(u'Allow discussion on this article?'),
initial=True,
required=False)
category = forms.ChoiceField(
choices=CATEGORIES,
# Required for non-translations, which is
# enforced in Document.clean().
required=False,
label=_lazy(u'Category:'),
help_text=_lazy(u'Type of article'))
topics = forms.MultipleChoiceField(
label=_lazy(u'Topics:'),
required=False,
widget=ProductTopicsAndSubtopicsWidget())
related_documents = forms.MultipleChoiceField(
label=_lazy(u'Related documents:'),
required=False,
widget=RelatedDocumentsWidget())
locale = forms.CharField(widget=forms.HiddenInput())
needs_change = forms.BooleanField(
label=_lazy(u'Needs change:'),
initial=False,
required=False)
needs_change_comment = forms.CharField(
label=_lazy(u'Comment:'),
widget=forms.Textarea(),
required=False)
def clean_slug(self):
slug = self.cleaned_data['slug']
# Blacklist /, ?, % and +,
if not re.compile(r'^[^/^\+^\?%]+$').match(slug):
raise forms.ValidationError(SLUG_INVALID)
return slug
def clean(self):
c = super(DocumentForm, self).clean()
locale = c.get('locale')
# Products are required for en-US
products = c.get('products')
if (locale == settings.WIKI_DEFAULT_LANGUAGE and
(not products or len(products) < 1)):
raise forms.ValidationError(PRODUCT_REQUIRED)
# Topics are required for en-US
topics = c.get('topics')
if (locale == settings.WIKI_DEFAULT_LANGUAGE and
(not topics or len(topics) < 1)):
raise forms.ValidationError(TOPIC_REQUIRED)
return c
class Meta:
model = Document
fields = ('title', 'slug', 'category', 'is_localizable', 'products',
'topics', 'locale', 'is_archived', 'allow_discussion',
'needs_change', 'needs_change_comment', 'related_documents')
def save(self, parent_doc, **kwargs):
"""Persist the Document form, and return the saved Document."""
doc = super(DocumentForm, self).save(commit=False, **kwargs)
doc.parent = parent_doc
# If document doesn't need change, clear out the comment.
if not doc.needs_change:
doc.needs_change_comment = ''
# Create the share link if it doesn't exist and is in
# a category it should show for.
doc.save()
if (doc.category in settings.IA_DEFAULT_CATEGORIES
and not doc.share_link):
# This operates under the constraints of passing in a list.
add_short_links.delay([doc.pk])
self.save_m2m()
if parent_doc:
# Products are not set on translations.
doc.products.remove(*[p for p in doc.products.all()])
return doc
class RevisionForm(forms.ModelForm):
"""Form to create new revisions."""
keywords = StrippedCharField(required=False,
label=_lazy(u'Keywords:'),
help_text=_lazy(u'Affects search results'))
summary = StrippedCharField(
min_length=5, max_length=1000, widget=forms.Textarea(),
label=_lazy(u'Search result summary:'),
help_text=_lazy(u'Only displayed on search results page'),
error_messages={'required': SUMMARY_REQUIRED,
'min_length': SUMMARY_SHORT,
'max_length': SUMMARY_LONG})
content = StrippedCharField(
min_length=5, max_length=100000,
label=_lazy(u'Content:'),
widget=forms.Textarea(),
error_messages={'required': CONTENT_REQUIRED,
'min_length': CONTENT_SHORT,
'max_length': CONTENT_LONG})
expires = forms.DateField(
label=_lazy(u'Expiry date:'),
required=False)
comment = StrippedCharField(required=False, label=_lazy(u'Comment:'))
class Meta(object):
model = Revision
fields = ('keywords', 'summary', 'content', 'comment', 'based_on',
'expires')
def __init__(self, *args, **kwargs):
super(RevisionForm, self).__init__(*args, **kwargs)
self.fields['based_on'].widget = forms.HiddenInput()
self.fields['comment'].widget = forms.TextInput(
attrs={'maxlength': MAX_REVISION_COMMENT_LENGTH})
def save(self, creator, document, based_on_id=None, base_rev=None,
**kwargs):
"""Persist me, and return the saved Revision.
Take several other necessary pieces of data that aren't from the
form.
"""
# Throws a TypeError if somebody passes in a commit kwarg:
new_rev = super(RevisionForm, self).save(commit=False, **kwargs)
new_rev.document = document
new_rev.creator = creator
if based_on_id:
new_rev.based_on_id = based_on_id
# If the document doesn't allow the revision creator to edit the
# keywords, keep the old value.
if base_rev and not document.allows(creator, 'edit_keywords'):
new_rev.keywords = base_rev.keywords
new_rev.save()
return new_rev
class ReviewForm(forms.Form):
comment = StrippedCharField(max_length=2000, widget=forms.Textarea(),
required=False, label=_lazy(u'Comment:'),
error_messages={'max_length': COMMENT_LONG})
_widget = forms.RadioSelect(renderer=RadioFieldRendererWithHelpText)
significance = forms.TypedChoiceField(
label=_lazy(u'Significance:'),
choices=SIGNIFICANCES,
initial=SIGNIFICANCES[1][0],
required=False, widget=_widget,
coerce=int, empty_value=SIGNIFICANCES[1][0])
is_ready_for_localization = forms.BooleanField(
initial=False,
label=_lazy(u'Ready for localization'),
required=False)
needs_change = forms.BooleanField(
label=_lazy(u'Needs change'),
initial=False,
required=False)
needs_change_comment = forms.CharField(
label=_lazy(u'Comment:'),
widget=forms.Textarea(),
required=False)
class AddContributorForm(forms.Form):
"""Form to add contributors to a document."""
users = MultiUsernameField(
widget=forms.TextInput(attrs={'placeholder': _lazy(u'username'),
'class': 'user-autocomplete'}))
languages = [('', 'Any')] + [(l[0], u'{1} ({0})'.format(*l))
for l in settings.LANGUAGE_CHOICES]
class RevisionFilterForm(forms.Form):
"""Form to filter a list of revisions."""
locale = forms.ChoiceField(label=_lazy(u'Locale:'), choices=languages,
required=False)
users = MultiUsernameField(label=_lazy(u'Users:'), required=False)
start = forms.DateField(label=_lazy(u'Start:'), required=False)
end = forms.DateField(label=_lazy(u'End:'), required=False)
| YOTOV-LIMITED/kitsune | kitsune/wiki/forms.py | Python | bsd-3-clause | 12,418 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
from thumbor.filters import BaseFilter, filter_method
class Filter(BaseFilter):
@filter_method()
def strip_icc(self):
self.engine.strip_icc()
| food52/thumbor | thumbor/filters/strip_icc.py | Python | mit | 413 |
#
# Copyright (c) 2014 Juniper Networks, Inc.
#
import setuptools
def requirements(filename):
with open(filename) as f:
lines = f.read().splitlines()
return lines
setuptools.setup(
name='contrail-vrouter-api',
version='1.0',
packages=setuptools.find_packages(),
# metadata
author="OpenContrail",
author_email="dev@lists.opencontrail.org",
license="Apache Software License",
url="http://www.opencontrail.org/",
install_requires=requirements('requirements.txt'),
test_suite='contrail_vrouter_api.tests',
tests_require=requirements('test-requirements.txt'),
)
| facetothefate/contrail-controller | src/vnsw/contrail-vrouter-api/setup.py | Python | apache-2.0 | 624 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A handler that displays servers and their instances."""
from google.appengine.tools.devappserver2.admin import admin_request_handler
class ModulesHandler(admin_request_handler.AdminRequestHandler):
def get(self):
values = {'modules': self.dispatcher.modules}
self.response.write(self.render('modules.html', values))
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/google/appengine/tools/devappserver2/admin/modules_handler.py | Python | bsd-3-clause | 934 |
# -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import posixpath
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import template_rendered
from .module import blueprint_is_module
from ._compat import itervalues, iteritems
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
for loader, local_name in self._iter_loaders(template):
try:
return loader.get_source(environment, local_name)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
# old style module based loaders in case we are dealing with a
# blueprint that is an old style module
try:
module, local_name = posixpath.normpath(template).split('/', 1)
blueprint = self.app.blueprints[module]
if blueprint_is_module(blueprint):
loader = blueprint.jinja_loader
if loader is not None:
yield loader, local_name
except (ValueError, KeyError):
pass
for blueprint in itervalues(self.app.blueprints):
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
yield loader, template
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for name, blueprint in iteritems(self.app.blueprints):
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
prefix = ''
if blueprint_is_module(blueprint):
prefix = name + '/'
result.add(prefix + template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app)
| zwChan/VATEC | ~/eb-virt/Lib/site-packages/flask/templating.py | Python | apache-2.0 | 4,707 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Settings/Master/EquippedBadgeSettings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Settings/Master/EquippedBadgeSettings.proto',
package='POGOProtos.Settings.Master',
syntax='proto3',
serialized_pb=_b('\n6POGOProtos/Settings/Master/EquippedBadgeSettings.proto\x12\x1aPOGOProtos.Settings.Master\"y\n\x15\x45quippedBadgeSettings\x12\x1f\n\x17\x65quip_badge_cooldown_ms\x18\x01 \x01(\x03\x12\x1f\n\x17\x63\x61tch_probability_bonus\x18\x02 \x03(\x02\x12\x1e\n\x16\x66lee_probability_bonus\x18\x03 \x03(\x02\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EQUIPPEDBADGESETTINGS = _descriptor.Descriptor(
name='EquippedBadgeSettings',
full_name='POGOProtos.Settings.Master.EquippedBadgeSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='equip_badge_cooldown_ms', full_name='POGOProtos.Settings.Master.EquippedBadgeSettings.equip_badge_cooldown_ms', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='catch_probability_bonus', full_name='POGOProtos.Settings.Master.EquippedBadgeSettings.catch_probability_bonus', index=1,
number=2, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='flee_probability_bonus', full_name='POGOProtos.Settings.Master.EquippedBadgeSettings.flee_probability_bonus', index=2,
number=3, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=86,
serialized_end=207,
)
DESCRIPTOR.message_types_by_name['EquippedBadgeSettings'] = _EQUIPPEDBADGESETTINGS
EquippedBadgeSettings = _reflection.GeneratedProtocolMessageType('EquippedBadgeSettings', (_message.Message,), dict(
DESCRIPTOR = _EQUIPPEDBADGESETTINGS,
__module__ = 'POGOProtos.Settings.Master.EquippedBadgeSettings_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Settings.Master.EquippedBadgeSettings)
))
_sym_db.RegisterMessage(EquippedBadgeSettings)
# @@protoc_insertion_point(module_scope)
| favll/pogom | pogom/pgoapi/protos/POGOProtos/Settings/Master/EquippedBadgeSettings_pb2.py | Python | mit | 3,197 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
from odoo import api, fields, models, _
from odoo.exceptions import UserError
class IrFilters(models.Model):
_name = 'ir.filters'
_description = 'Filters'
_order = 'model_id, name, id desc'
name = fields.Char(string='Filter Name', translate=True, required=True)
user_id = fields.Many2one('res.users', string='User', ondelete='cascade', default=lambda self: self._uid,
help="The user this filter is private to. When left empty the filter is public "
"and available to all users.")
domain = fields.Text(default='[]', required=True)
context = fields.Text(default='{}', required=True)
sort = fields.Text(default='[]', required=True)
model_id = fields.Selection(selection='_list_all_models', string='Model', required=True)
is_default = fields.Boolean(string='Default filter')
action_id = fields.Many2one('ir.actions.actions', string='Action', ondelete='cascade',
help="The menu action this filter applies to. "
"When left empty the filter applies to all menus "
"for this model.")
active = fields.Boolean(default=True)
@api.model
def _list_all_models(self):
self._cr.execute("SELECT model, name FROM ir_model ORDER BY name")
return self._cr.fetchall()
@api.multi
def copy(self, default=None):
self.ensure_one()
default = dict(default or {}, name=_('%s (copy)') % self.name)
return super(IrFilters, self).copy(default)
@api.multi
def _get_eval_domain(self):
self.ensure_one()
return ast.literal_eval(self.domain)
@api.model
def _get_action_domain(self, action_id=None):
"""Return a domain component for matching filters that are visible in the
same context (menu/view) as the given action."""
if action_id:
# filters specific to this menu + global ones
return [('action_id', 'in', [action_id, False])]
# only global ones
return [('action_id', '=', False)]
@api.model
def get_filters(self, model, action_id=None):
"""Obtain the list of filters available for the user on the given model.
:param action_id: optional ID of action to restrict filters to this action
plus global filters. If missing only global filters are returned.
The action does not have to correspond to the model, it may only be
a contextual action.
:return: list of :meth:`~osv.read`-like dicts containing the
``name``, ``is_default``, ``domain``, ``user_id`` (m2o tuple),
``action_id`` (m2o tuple) and ``context`` of the matching ``ir.filters``.
"""
# available filters: private filters (user_id=uid) and public filters (uid=NULL),
# and filters for the action (action_id=action_id) or global (action_id=NULL)
action_domain = self._get_action_domain(action_id)
filters = self.search(action_domain + [('model_id', '=', model), ('user_id', 'in', [self._uid, False])])
user_context = self.env.user.context_get()
return filters.with_context(user_context).read(['name', 'is_default', 'domain', 'context', 'user_id', 'sort'])
@api.model
def _check_global_default(self, vals, matching_filters):
""" _check_global_default(dict, list(dict), dict) -> None
Checks if there is a global default for the model_id requested.
If there is, and the default is different than the record being written
(-> we're not updating the current global default), raise an error
to avoid users unknowingly overwriting existing global defaults (they
have to explicitly remove the current default before setting a new one)
This method should only be called if ``vals`` is trying to set
``is_default``
:raises odoo.exceptions.UserError: if there is an existing default and
we're not updating it
"""
domain = self._get_action_domain(vals.get('action_id'))
defaults = self.search(domain + [
('model_id', '=', vals['model_id']),
('user_id', '=', False),
('is_default', '=', True),
])
if not defaults:
return
if matching_filters and (matching_filters[0]['id'] == defaults.id):
return
raise UserError(_("There is already a shared filter set as default for %(model)s, delete or change it before setting a new default") % {'model': vals.get('model_id')})
@api.model
@api.returns('self', lambda value: value.id)
def create_or_replace(self, vals):
action_id = vals.get('action_id')
current_filters = self.get_filters(vals['model_id'], action_id)
matching_filters = [f for f in current_filters
if f['name'].lower() == vals['name'].lower()
# next line looks for matching user_ids (specific or global), i.e.
# f.user_id is False and vals.user_id is False or missing,
# or f.user_id.id == vals.user_id
if (f['user_id'] and f['user_id'][0]) == vals.get('user_id')]
if vals.get('is_default'):
if vals.get('user_id'):
# Setting new default: any other default that belongs to the user
# should be turned off
domain = self._get_action_domain(action_id)
defaults = self.search(domain + [
('model_id', '=', vals['model_id']),
('user_id', '=', vals['user_id']),
('is_default', '=', True),
])
if defaults:
defaults.write({'is_default': False})
else:
self._check_global_default(vals, matching_filters)
# When a filter exists for the same (name, model, user) triple, we simply
# replace its definition (considering action_id irrelevant here)
if matching_filters:
matching_filter = self.browse(matching_filters[0]['id'])
matching_filter.write(vals)
return matching_filter
return self.create(vals)
_sql_constraints = [
# Partial constraint, complemented by unique index (see below). Still
# useful to keep because it provides a proper error message when a
# violation occurs, as it shares the same prefix as the unique index.
('name_model_uid_unique', 'unique (name, model_id, user_id, action_id)', 'Filter names must be unique'),
]
@api.model_cr_context
def _auto_init(self):
result = super(IrFilters, self)._auto_init()
# Use unique index to implement unique constraint on the lowercase name (not possible using a constraint)
self._cr.execute("DROP INDEX IF EXISTS ir_filters_name_model_uid_unique_index") # drop old index w/o action
self._cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = 'ir_filters_name_model_uid_unique_action_index'")
if not self._cr.fetchone():
self._cr.execute("""CREATE UNIQUE INDEX "ir_filters_name_model_uid_unique_action_index" ON ir_filters
(lower(name), model_id, COALESCE(user_id,-1), COALESCE(action_id,-1))""")
return result
| chienlieu2017/it_management | odoo/odoo/addons/base/ir/ir_filters.py | Python | gpl-3.0 | 7,581 |
"""bug 867387 Bixie draft schema
Revision ID: 22e4e60e03f
Revises: 37004fc6e41e
Create Date: 2013-05-10 13:20:35.750954
"""
# revision identifiers, used by Alembic.
revision = '22e4e60e03f'
down_revision = '37004fc6e41e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects import postgresql
import sqlalchemy.types as types
from sqlalchemy.sql import table, column
class CITEXT(types.UserDefinedType):
name = 'citext'
def get_col_spec(self):
return 'CITEXT'
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
def __repr__(self):
return "citext"
class JSON(types.UserDefinedType):
name = 'json'
def get_col_spec(self):
return 'JSON'
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
def __repr__(self):
return "json"
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute(u'CREATE SCHEMA bixie')
op.create_table(u'product_versions',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', CITEXT(), nullable=True),
sa.Column(u'release_version', sa.TEXT(), nullable=True),
sa.Column(u'major_version', sa.TEXT(), nullable=True),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'full_urls',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'url', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'raw_product_releases',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'version', sa.TEXT(), nullable=False),
sa.Column(u'build', sa.TEXT(), nullable=False),
sa.Column(u'build_type', CITEXT(), nullable=False),
sa.Column(u'platform', sa.TEXT(), nullable=False),
sa.Column(u'product_name', CITEXT(), nullable=False),
sa.Column(u'repository', sa.TEXT(), nullable=False),
sa.Column(u'stability', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'products',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'crashes_normalized',
sa.Column(u'crash_id', postgresql.UUID(), autoincrement=False,
nullable=False),
sa.Column(u'signature_id', sa.TEXT(), nullable=False),
sa.Column(u'error_message_id', JSON(), nullable=False),
sa.Column(u'product_id', sa.TEXT(), nullable=True),
sa.Column(u'user_agent_id', sa.TEXT(), nullable=True),
sa.PrimaryKeyConstraint(u'crash_id'),
schema=u'bixie'
)
op.create_table(u'hosts',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'signatures',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'signature', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'crashes',
sa.Column(u'crash_id', postgresql.UUID(), autoincrement=False,
nullable=False),
sa.Column(u'signature', sa.TEXT(), nullable=False),
sa.Column(u'error', JSON(), nullable=False),
sa.Column(u'product', sa.TEXT(), nullable=True),
sa.Column(u'protocol', sa.TEXT(), nullable=True),
sa.Column(u'hostname', sa.TEXT(), nullable=True),
sa.Column(u'username', sa.TEXT(), nullable=True),
sa.Column(u'port', sa.TEXT(), nullable=True),
sa.Column(u'path', sa.TEXT(), nullable=True),
sa.Column(u'query', sa.TEXT(), nullable=True),
sa.Column(u'full_url', sa.TEXT(), nullable=True),
sa.Column(u'user_agent', sa.TEXT(), nullable=True),
sa.Column(u'success', sa.BOOLEAN(), nullable=True),
sa.Column(u'client_crash_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column(u'client_submitted_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column(u'processor_started_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column(u'processor_completed_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.PrimaryKeyConstraint(u'crash_id'),
schema=u'bixie'
)
op.create_table(u'release_channels',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', CITEXT(), nullable=False),
sa.Column(u'sort', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'os_names',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'error_messages',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'error_message', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_version_adi',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'product_version_id', sa.INTEGER(), nullable=False),
sa.Column(u'adi_count', sa.BIGINT(), nullable=False),
sa.Column(u'adi_date', sa.INTEGER(), nullable=False),
sa.Column(u'os_name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'raw_adi',
sa.Column(u'adi_count', sa.BIGINT(), nullable=True),
sa.Column(u'date', sa.DATE(), nullable=True),
sa.Column(u'product_name', sa.TEXT(), nullable=True),
sa.Column(u'product_os_platform', sa.TEXT(), nullable=True),
sa.Column(u'product_os_version', sa.TEXT(), nullable=True),
sa.Column(u'product_version', sa.TEXT(), nullable=True),
sa.Column(u'build', sa.TEXT(), nullable=True),
sa.Column(u'build_channel', sa.TEXT(), nullable=True),
sa.Column(u'product_guid', sa.TEXT(), nullable=True),
sa.Column(u'received_at', postgresql.TIMESTAMP(timezone=True),
nullable=True),
sa.PrimaryKeyConstraint(),
schema=u'bixie'
)
op.create_table(u'users',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_adi',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'product_id', sa.INTEGER(), nullable=False),
sa.Column(u'adi_count', sa.BIGINT(), nullable=False),
sa.Column(u'adi_date', sa.INTEGER(), nullable=False),
sa.Column(u'os_name', CITEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'user_agents',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'error_message_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['error_message_id'],
[u'bixie.error_messages.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_users',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'product_id', sa.INTEGER(), nullable=True),
sa.Column(u'user_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['product_id'], [u'bixie.products.id'], ),
sa.ForeignKeyConstraint(['user_id'], [u'bixie.users.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_release_channels',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'release_channel_id', sa.INTEGER(), nullable=True),
sa.Column(u'product_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['product_id'], [u'bixie.products.id'], ),
sa.ForeignKeyConstraint(['release_channel_id'],
[u'bixie.release_channels.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'error_message_products',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'error_message_id', sa.INTEGER(), nullable=True),
sa.Column(u'product_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['error_message_id'],
[u'bixie.error_messages.id'], ),
sa.ForeignKeyConstraint(['product_id'], [u'bixie.products.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table(u'error_message_products', schema=u'bixie')
op.drop_table(u'product_release_channels', schema=u'bixie')
op.drop_table(u'product_users', schema=u'bixie')
op.drop_table(u'user_agents', schema=u'bixie')
op.drop_table(u'product_adi', schema=u'bixie')
op.drop_table(u'users', schema=u'bixie')
op.drop_table(u'raw_adi', schema=u'bixie')
op.drop_table(u'product_version_adi', schema=u'bixie')
op.drop_table(u'error_messages', schema=u'bixie')
op.drop_table(u'os_names', schema=u'bixie')
op.drop_table(u'release_channels', schema=u'bixie')
op.drop_table(u'crashes', schema=u'bixie')
op.drop_table(u'signatures', schema=u'bixie')
op.drop_table(u'hosts', schema=u'bixie')
op.drop_table(u'crashes_normalized', schema=u'bixie')
op.drop_table(u'products', schema=u'bixie')
op.drop_table(u'raw_product_releases', schema=u'bixie')
op.drop_table(u'full_urls', schema=u'bixie')
op.drop_table(u'product_versions', schema=u'bixie')
op.execute(u'DROP SCHEMA bixie')
### end Alembic commands ###
| KaiRo-at/socorro | alembic/versions/22e4e60e03f_bug_867387_bixie_dra.py | Python | mpl-2.0 | 9,842 |
from __future__ import absolute_import
from itertools import cycle
import logging
import random
import six
from six.moves import xrange
from .base import Producer
log = logging.getLogger(__name__)
class SimpleProducer(Producer):
"""A simple, round-robin producer.
See Producer class for Base Arguments
Additional Arguments:
random_start (bool, optional): randomize the initial partition which
the first message block will be published to, otherwise
if false, the first message block will always publish
to partition 0 before cycling through each partition,
defaults to True.
"""
def __init__(self, *args, **kwargs):
self.partition_cycles = {}
self.random_start = kwargs.pop('random_start', True)
super(SimpleProducer, self).__init__(*args, **kwargs)
def _next_partition(self, topic):
if topic not in self.partition_cycles:
if not self.client.has_metadata_for_topic(topic):
self.client.load_metadata_for_topics(topic)
self.partition_cycles[topic] = cycle(self.client.get_partition_ids_for_topic(topic))
# Randomize the initial partition that is returned
if self.random_start:
num_partitions = len(self.client.get_partition_ids_for_topic(topic))
for _ in xrange(random.randint(0, num_partitions-1)):
next(self.partition_cycles[topic])
return next(self.partition_cycles[topic])
def send_messages(self, topic, *msg):
if not isinstance(topic, six.binary_type):
topic = topic.encode('utf-8')
partition = self._next_partition(topic)
return super(SimpleProducer, self).send_messages(
topic, partition, *msg
)
def __repr__(self):
return '<SimpleProducer batch=%s>' % self.async
| ianawilson/kafka-python | kafka/producer/simple.py | Python | apache-2.0 | 1,893 |
from setuptools import setup
version = '1.5.0a0'
testing_extras = ['nose', 'coverage']
docs_extras = ['Sphinx']
setup(
name='WebOb',
version=version,
description="WSGI request and response object",
long_description="""\
WebOb provides wrappers around the WSGI request environment, and an
object to help create WSGI responses.
The objects map much of the specified behavior of HTTP, including
header parsing and accessors for other standard parts of the
environment.
You may install the `in-development version of WebOb
<https://github.com/Pylons/webob/zipball/master#egg=WebOb-dev>`_ with
``pip install WebOb==dev`` (or ``easy_install WebOb==dev``).
* `WebOb reference <http://docs.webob.org/en/latest/reference.html>`_
* `Bug tracker <https://github.com/Pylons/webob/issues>`_
* `Browse source code <https://github.com/Pylons/webob>`_
* `Mailing list <http://bit.ly/paste-users>`_
* `Release news <http://docs.webob.org/en/latest/news.html>`_
* `Detailed changelog <https://github.com/Pylons/webob/commits/master>`_
""",
classifiers=[
"Development Status :: 6 - Mature",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
keywords='wsgi request web http',
author='Ian Bicking',
author_email='ianb@colorstudy.com',
maintainer='Pylons Project',
url='http://webob.org/',
license='MIT',
packages=['webob'],
zip_safe=True,
test_suite='nose.collector',
tests_require=['nose'],
extras_require = {
'testing':testing_extras,
'docs':docs_extras,
},
)
| endlessm/chromium-browser | third_party/catapult/third_party/WebOb/setup.py | Python | bsd-3-clause | 2,154 |
# test re.sub with unmatched groups, behaviour changed in CPython 3.5
try:
import ure as re
except ImportError:
try:
import re
except ImportError:
print("SKIP")
raise SystemExit
try:
re.sub
except AttributeError:
print("SKIP")
raise SystemExit
# first group matches, second optional group doesn't so is replaced with a blank
print(re.sub(r"(a)(b)?", r"\2-\1", "1a2"))
| kerneltask/micropython | tests/extmod/ure_sub_unmatched.py | Python | mit | 419 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "travis-ci.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| uw-it-aca/mdot-developers | travis-ci/manage.py | Python | apache-2.0 | 252 |
from ...scheme import Scheme
from ..schemeinfo import SchemeInfoDialog
from ...gui import test
class TestSchemeInfo(test.QAppTestCase):
def test_scheme_info(self):
scheme = Scheme(title="A Scheme", description="A String\n")
dialog = SchemeInfoDialog()
dialog.setScheme(scheme)
status = dialog.exec_()
if status == dialog.Accepted:
self.assertEqual(scheme.title.strip(),
str(dialog.editor.name_edit.text()).strip())
self.assertEqual(scheme.description,
str(dialog.editor.desc_edit \
.toPlainText()).strip())
| cheral/orange3 | Orange/canvas/application/tests/test_schemeinfo.py | Python | bsd-2-clause | 680 |
# Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_service_icmp
OBJECT = {
"name": "Icmp1",
"icmp_type": 5,
"icmp_code": 7
}
CREATE_PAYLOAD = {
"name": "Icmp1",
"icmp_type": 5,
"icmp_code": 7
}
UPDATE_PAYLOAD = {
"name": "Icmp1",
"icmp_type": 45,
"icmp_code": 13
}
OBJECT_AFTER_UPDATE = UPDATE_PAYLOAD
DELETE_PAYLOAD = {
"name": "Icmp1",
"state": "absent"
}
function_path = 'ansible.modules.network.check_point.cp_mgmt_service_icmp.api_call'
api_call_object = 'service-icmp'
class TestCheckpointServiceIcmp(object):
module = cp_mgmt_service_icmp
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_create(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert result['changed']
assert OBJECT.items() == result[api_call_object].items()
def test_create_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert not result['changed']
def test_update(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert result['changed']
assert OBJECT_AFTER_UPDATE.items() == result[api_call_object].items()
def test_update_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert not result['changed']
def test_delete(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True}
result = self._run_module(DELETE_PAYLOAD)
assert result['changed']
def test_delete_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False}
result = self._run_module(DELETE_PAYLOAD)
assert not result['changed']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| thaim/ansible | test/units/modules/network/check_point/test_cp_mgmt_service_icmp.py | Python | mit | 3,878 |
# Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_host
OBJECT = {
"name": "New Host 1",
"ip_address": "192.0.2.1"
}
CREATE_PAYLOAD = {
"name": "New Host 1",
"ip_address": "192.0.2.1"
}
UPDATE_PAYLOAD = {
"name": "New Host 1",
"color": "blue",
"ipv4_address": "192.0.2.2"
}
OBJECT_AFTER_UPDATE = UPDATE_PAYLOAD
DELETE_PAYLOAD = {
"name": "New Host 1",
"state": "absent"
}
function_path = 'ansible.modules.network.check_point.cp_mgmt_host.api_call'
api_call_object = 'host'
class TestCheckpointHost(object):
module = cp_mgmt_host
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_create(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert result['changed']
assert OBJECT.items() == result[api_call_object].items()
def test_create_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert not result['changed']
def test_update(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert result['changed']
assert OBJECT_AFTER_UPDATE.items() == result[api_call_object].items()
def test_update_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert not result['changed']
def test_delete(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True}
result = self._run_module(DELETE_PAYLOAD)
assert result['changed']
def test_delete_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False}
result = self._run_module(DELETE_PAYLOAD)
assert not result['changed']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| kvar/ansible | test/units/modules/network/check_point/test_cp_mgmt_host.py | Python | gpl-3.0 | 3,853 |
from website.addons.base.serializer import CitationsAddonSerializer
class MendeleySerializer(CitationsAddonSerializer):
addon_short_name = 'mendeley'
| zachjanicki/osf.io | website/addons/mendeley/serializer.py | Python | apache-2.0 | 155 |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from neutronclient.common import exceptions as neutron_exc
from openstack_dashboard.api import keystone
from openstack_dashboard.api import network
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.project.access_and_security.\
api_access.tables import EndpointsTable
from openstack_dashboard.dashboards.project.access_and_security.\
floating_ips.tables import FloatingIPsTable
from openstack_dashboard.dashboards.project.access_and_security.\
keypairs.tables import KeypairsTable
from openstack_dashboard.dashboards.project.access_and_security.\
security_groups.tables import SecurityGroupsTable
class SecurityGroupsTab(tabs.TableTab):
table_classes = (SecurityGroupsTable,)
name = _("Security Groups")
slug = "security_groups_tab"
template_name = "horizon/common/_detail_table.html"
permissions = ('openstack.services.compute',)
def get_security_groups_data(self):
try:
security_groups = network.security_group_list(self.request)
except neutron_exc.ConnectionFailed:
security_groups = []
exceptions.handle(self.request)
except Exception:
security_groups = []
exceptions.handle(self.request,
_('Unable to retrieve security groups.'))
return sorted(security_groups, key=lambda group: group.name)
class KeypairsTab(tabs.TableTab):
table_classes = (KeypairsTable,)
name = _("Key Pairs")
slug = "keypairs_tab"
template_name = "horizon/common/_detail_table.html"
permissions = ('openstack.services.compute',)
def get_keypairs_data(self):
try:
keypairs = nova.keypair_list(self.request)
except Exception:
keypairs = []
exceptions.handle(self.request,
_('Unable to retrieve key pair list.'))
return keypairs
class FloatingIPsTab(tabs.TableTab):
table_classes = (FloatingIPsTable,)
name = _("Floating IPs")
slug = "floating_ips_tab"
template_name = "horizon/common/_detail_table.html"
permissions = ('openstack.services.compute',)
def get_floating_ips_data(self):
try:
floating_ips = network.tenant_floating_ip_list(self.request)
except neutron_exc.ConnectionFailed:
floating_ips = []
exceptions.handle(self.request)
except Exception:
floating_ips = []
exceptions.handle(self.request,
_('Unable to retrieve floating IP addresses.'))
try:
floating_ip_pools = network.floating_ip_pools_list(self.request)
except neutron_exc.ConnectionFailed:
floating_ip_pools = []
exceptions.handle(self.request)
except Exception:
floating_ip_pools = []
exceptions.handle(self.request,
_('Unable to retrieve floating IP pools.'))
pool_dict = dict([(obj.id, obj.name) for obj in floating_ip_pools])
instances = []
try:
instances, has_more = nova.server_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve instance list.'))
instances_dict = dict([(obj.id, obj.name) for obj in instances])
for ip in floating_ips:
ip.instance_name = instances_dict.get(ip.instance_id)
ip.pool_name = pool_dict.get(ip.pool, ip.pool)
return floating_ips
def allowed(self, request):
return network.floating_ip_supported(request)
class APIAccessTab(tabs.TableTab):
table_classes = (EndpointsTable,)
name = _("API Access")
slug = "api_access_tab"
template_name = "horizon/common/_detail_table.html"
def get_endpoints_data(self):
services = []
for i, service in enumerate(self.request.user.service_catalog):
service['id'] = i
services.append(
keystone.Service(service, self.request.user.services_region))
return services
class AccessAndSecurityTabs(tabs.TabGroup):
slug = "access_security_tabs"
tabs = (SecurityGroupsTab, KeypairsTab, FloatingIPsTab, APIAccessTab)
sticky = True
| takeshineshiro/horizon | openstack_dashboard/dashboards/project/access_and_security/tabs.py | Python | apache-2.0 | 5,203 |
# Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parse a response to the 'ismaster' command."""
import itertools
from bson.py3compat import imap
from pymongo import common
from pymongo.server_type import SERVER_TYPE
def _get_server_type(doc):
"""Determine the server type from an ismaster response."""
if not doc.get('ok'):
return SERVER_TYPE.Unknown
if doc.get('isreplicaset'):
return SERVER_TYPE.RSGhost
elif doc.get('setName'):
if doc.get('hidden'):
return SERVER_TYPE.RSOther
elif doc.get('ismaster'):
return SERVER_TYPE.RSPrimary
elif doc.get('secondary'):
return SERVER_TYPE.RSSecondary
elif doc.get('arbiterOnly'):
return SERVER_TYPE.RSArbiter
else:
return SERVER_TYPE.RSOther
elif doc.get('msg') == 'isdbgrid':
return SERVER_TYPE.Mongos
else:
return SERVER_TYPE.Standalone
class IsMaster(object):
__slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable')
def __init__(self, doc):
"""Parse an ismaster response from the server."""
self._server_type = _get_server_type(doc)
self._doc = doc
self._is_writable = self._server_type in (
SERVER_TYPE.RSPrimary,
SERVER_TYPE.Standalone,
SERVER_TYPE.Mongos)
self._is_readable = (
self.server_type == SERVER_TYPE.RSSecondary
or self._is_writable)
@property
def server_type(self):
return self._server_type
@property
def all_hosts(self):
"""List of hosts, passives, and arbiters known to this server."""
return set(imap(common.clean_node, itertools.chain(
self._doc.get('hosts', []),
self._doc.get('passives', []),
self._doc.get('arbiters', []))))
@property
def tags(self):
"""Replica set member tags or empty dict."""
return self._doc.get('tags', {})
@property
def primary(self):
"""This server's opinion about who the primary is, or None."""
if self._doc.get('primary'):
return common.partition_node(self._doc['primary'])
else:
return None
@property
def replica_set_name(self):
"""Replica set name or None."""
return self._doc.get('setName')
@property
def max_bson_size(self):
return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE)
@property
def max_message_size(self):
return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size)
@property
def max_write_batch_size(self):
return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE)
@property
def min_wire_version(self):
return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION)
@property
def max_wire_version(self):
return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION)
@property
def election_id(self):
return self._doc.get('electionId')
@property
def is_writable(self):
return self._is_writable
@property
def is_readable(self):
return self._is_readable
| nichung/wwwflaskBlogrevA | venv/lib/python2.7/site-packages/pymongo/ismaster.py | Python | mit | 3,733 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Tim Rightnour <thegarbledone@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: snow_record
short_description: Manage records in ServiceNow
version_added: "2.5"
description:
- Creates, deletes and updates a single record in ServiceNow.
options:
table:
description:
- Table to query for records.
required: false
default: incident
type: str
state:
description:
- If C(present) is supplied with a C(number) argument, the module will attempt to update the record with the supplied data.
- If no such record exists, a new one will be created.
- C(absent) will delete a record.
choices: [ present, absent ]
required: true
type: str
data:
description:
- key, value pairs of data to load into the record. See Examples.
- Required for C(state:present).
type: dict
number:
description:
- Record number to update.
- Required for C(state:absent).
required: false
type: str
lookup_field:
description:
- Changes the field that C(number) uses to find records.
required: false
default: number
type: str
attachment:
description:
- Attach a file to the record.
required: false
type: str
requirements:
- python pysnow (pysnow)
author:
- Tim Rightnour (@garbled1)
extends_documentation_fragment: service_now.documentation
'''
EXAMPLES = '''
- name: Grab a user record
snow_record:
username: ansible_test
password: my_password
instance: dev99999
state: present
number: 62826bf03710200044e0bfc8bcbe5df1
table: sys_user
lookup_field: sys_id
- name: Grab a user record using OAuth
snow_record:
username: ansible_test
password: my_password
client_id: "1234567890abcdef1234567890abcdef"
client_secret: "Password1!"
instance: dev99999
state: present
number: 62826bf03710200044e0bfc8bcbe5df1
table: sys_user
lookup_field: sys_id
- name: Create an incident
snow_record:
username: ansible_test
password: my_password
instance: dev99999
state: present
data:
short_description: "This is a test incident opened by Ansible"
severity: 3
priority: 2
register: new_incident
- name: Delete the record we just made
snow_record:
username: admin
password: xxxxxxx
instance: dev99999
state: absent
number: "{{new_incident['record']['number']}}"
- name: Delete a non-existant record
snow_record:
username: ansible_test
password: my_password
instance: dev99999
state: absent
number: 9872354
failed_when: false
- name: Update an incident
snow_record:
username: ansible_test
password: my_password
instance: dev99999
state: present
number: INC0000055
data:
work_notes : "Been working all day on this thing."
- name: Attach a file to an incident
snow_record:
username: ansible_test
password: my_password
instance: dev99999
state: present
number: INC0000055
attachment: README.md
tags: attach
'''
RETURN = '''
record:
description: Record data from Service Now
type: dict
returned: when supported
attached_file:
description: Details of the file that was attached via C(attachment)
type: dict
returned: when supported
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.service_now import ServiceNowClient
try:
# This is being handled by ServiceNowClient
import pysnow
except ImportError:
pass
def run_module():
# define the available arguments/parameters that a user can pass to
# the module
module_args = ServiceNowClient.snow_argument_spec()
module_args.update(
table=dict(type='str', required=False, default='incident'),
state=dict(choices=['present', 'absent'],
type='str', required=True),
number=dict(default=None, required=False, type='str'),
data=dict(default=None, required=False, type='dict'),
lookup_field=dict(default='number', required=False, type='str'),
attachment=dict(default=None, required=False, type='str')
)
module_required_together = [
['client_id', 'client_secret']
]
module_required_if = [
['state', 'absent', ['number']],
]
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
required_together=module_required_together,
required_if=module_required_if
)
# Connect to ServiceNow
service_now_client = ServiceNowClient(module)
service_now_client.login()
conn = service_now_client.conn
params = module.params
instance = params['instance']
table = params['table']
state = params['state']
number = params['number']
data = params['data']
lookup_field = params['lookup_field']
result = dict(
changed=False,
instance=instance,
table=table,
number=number,
lookup_field=lookup_field
)
# check for attachments
if params['attachment'] is not None:
attach = params['attachment']
b_attach = to_bytes(attach, errors='surrogate_or_strict')
if not os.path.exists(b_attach):
module.fail_json(msg="Attachment {0} not found".format(attach))
result['attachment'] = attach
else:
attach = None
# Deal with check mode
if module.check_mode:
# if we are in check mode and have no number, we would have created
# a record. We can only partially simulate this
if number is None:
result['record'] = dict(data)
result['changed'] = True
# do we want to check if the record is non-existent?
elif state == 'absent':
try:
record = conn.query(table=table, query={lookup_field: number})
res = record.get_one()
result['record'] = dict(Success=True)
result['changed'] = True
except pysnow.exceptions.NoResults:
result['record'] = None
except Exception as detail:
module.fail_json(msg="Unknown failure in query record: {0}".format(to_native(detail)), **result)
# Let's simulate modification
else:
try:
record = conn.query(table=table, query={lookup_field: number})
res = record.get_one()
for key, value in data.items():
res[key] = value
result['changed'] = True
result['record'] = res
except pysnow.exceptions.NoResults:
snow_error = "Record does not exist"
module.fail_json(msg=snow_error, **result)
except Exception as detail:
module.fail_json(msg="Unknown failure in query record: {0}".format(to_native(detail)), **result)
module.exit_json(**result)
# now for the real thing: (non-check mode)
# are we creating a new record?
if state == 'present' and number is None:
try:
record = conn.insert(table=table, payload=dict(data))
except pysnow.exceptions.UnexpectedResponseFormat as e:
snow_error = "Failed to create record: {0}, details: {1}".format(e.error_summary, e.error_details)
module.fail_json(msg=snow_error, **result)
except pysnow.legacy_exceptions.UnexpectedResponse as e:
module.fail_json(msg="Failed to create record due to %s" % to_native(e), **result)
result['record'] = record
result['changed'] = True
# we are deleting a record
elif state == 'absent':
try:
record = conn.query(table=table, query={lookup_field: number})
res = record.delete()
except pysnow.exceptions.NoResults:
res = dict(Success=True)
except pysnow.exceptions.MultipleResults:
snow_error = "Multiple record match"
module.fail_json(msg=snow_error, **result)
except pysnow.exceptions.UnexpectedResponseFormat as e:
snow_error = "Failed to delete record: {0}, details: {1}".format(e.error_summary, e.error_details)
module.fail_json(msg=snow_error, **result)
except pysnow.legacy_exceptions.UnexpectedResponse as e:
module.fail_json(msg="Failed to delete record due to %s" % to_native(e), **result)
except Exception as detail:
snow_error = "Failed to delete record: {0}".format(to_native(detail))
module.fail_json(msg=snow_error, **result)
result['record'] = res
result['changed'] = True
# We want to update a record
else:
try:
record = conn.query(table=table, query={lookup_field: number})
if data is not None:
res = record.update(dict(data))
result['record'] = res
result['changed'] = True
else:
res = record.get_one()
result['record'] = res
if attach is not None:
res = record.attach(b_attach)
result['changed'] = True
result['attached_file'] = res
except pysnow.exceptions.MultipleResults:
snow_error = "Multiple record match"
module.fail_json(msg=snow_error, **result)
except pysnow.exceptions.NoResults:
snow_error = "Record does not exist"
module.fail_json(msg=snow_error, **result)
except pysnow.exceptions.UnexpectedResponseFormat as e:
snow_error = "Failed to update record: {0}, details: {1}".format(e.error_summary, e.error_details)
module.fail_json(msg=snow_error, **result)
except pysnow.legacy_exceptions.UnexpectedResponse as e:
module.fail_json(msg="Failed to update record due to %s" % to_native(e), **result)
except Exception as detail:
snow_error = "Failed to update record: {0}".format(to_native(detail))
module.fail_json(msg=snow_error, **result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| thaim/ansible | lib/ansible/modules/notification/snow_record.py | Python | mit | 10,616 |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class WebGLConformanceExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('gl-enable-vertex-attrib.html',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
# Fails everywhere.
self.Skip('conformance/glsl/misc/large-loop-compile.html',
bug=322764)
self.Skip('conformance/textures/texture-size-limit.html',
bug=322789)
# Windows failures.
self.Fail('conformance/ogles/GL/atan/atan_001_to_008.html',
['win'], bug=322794)
self.Fail('conformance/ogles/GL/atan/atan_009_to_012.html',
['win'], bug=322794)
self.Skip('conformance/ogles/GL/control_flow/control_flow_001_to_008.html',
['win'], bug=322795)
# Windows/Intel failures
self.Fail('conformance/textures/texture-size.html',
['win', 'intel'], bug=121139)
self.Fail('conformance/rendering/gl-scissor-test.html',
['win', 'intel'], bug=314997)
# Windows/AMD failures
self.Fail('conformance/rendering/more-than-65536-indices.html',
['win', 'amd'], bug=314997)
# Windows 7/Intel failures
self.Fail('conformance/context/context-lost-restored.html',
['win7', 'intel'])
self.Fail('conformance/context/premultiplyalpha-test.html',
['win7', 'intel'])
self.Fail('conformance/extensions/oes-texture-float-with-image-data.html',
['win7', 'intel'])
self.Fail('conformance/extensions/oes-texture-float.html',
['win7', 'intel'])
self.Fail('conformance/limits/gl-min-attribs.html',
['win7', 'intel'])
self.Fail('conformance/limits/gl-max-texture-dimensions.html',
['win7', 'intel'])
self.Fail('conformance/limits/gl-min-textures.html',
['win7', 'intel'])
self.Fail('conformance/limits/gl-min-uniforms.html',
['win7', 'intel'])
self.Fail('conformance/rendering/gl-clear.html',
['win7', 'intel'])
self.Fail('conformance/textures/copy-tex-image-and-sub-image-2d.html',
['win7', 'intel'])
self.Fail('conformance/textures/gl-teximage.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-array-buffer-view.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-image-data.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgb565.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgba4444.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgba5551.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-image-with-format-and-type.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-sub-image-2d.html',
['win7', 'intel'])
self.Fail('conformance/textures/texparameter-test.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-active-bind-2.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-active-bind.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-complete.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-formats-test.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-mips.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-npot.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-size-cube-maps.html',
['win7', 'intel'])
self.Fail('conformance/context/context-attribute-preserve-drawing-buffer.html',
['win7', 'intel'], bug=322770)
# Mac failures.
self.Fail('conformance/glsl/misc/shaders-with-varyings.html',
['mac'], bug=322760)
self.Fail('conformance/context/context-attribute-preserve-drawing-buffer.html',
['mac'], bug=322770)
self.Skip('conformance/ogles/GL/control_flow/control_flow_001_to_008.html',
['mac'], bug=322795)
# Mac/Intel failures
self.Fail('conformance/rendering/gl-scissor-test.html',
['mac', 'intel'], bug=314997)
# The following two tests hang the WindowServer.
self.Skip('conformance/canvas/drawingbuffer-static-canvas-test.html',
['mac', 'intel'], bug=303915)
self.Skip('conformance/canvas/drawingbuffer-test.html',
['mac', 'intel'], bug=303915)
# The following three tests only fail.
# Radar 13499677
self.Fail(
'conformance/glsl/functions/glsl-function-smoothstep-gentype.html',
['mac', 'intel'], bug=225642)
# Radar 13499466
self.Fail('conformance/limits/gl-max-texture-dimensions.html',
['mac', 'intel'], bug=225642)
# Radar 13499623
self.Fail('conformance/textures/texture-size.html',
['mac', 'intel'], bug=225642)
self.Skip('conformance/ogles/GL/control_flow/control_flow_009_to_010.html',
['mac', 'intel'], bug=322795)
self.Fail('conformance/ogles/GL/operators/operators_009_to_016.html',
['mac', 'intel'], bug=322795)
# Mac/Intel failures on 10.7
self.Skip('conformance/glsl/functions/glsl-function-asin.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-dot.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-faceforward.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-length.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-normalize.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-reflect.html',
['lion', 'intel'])
self.Skip(
'conformance/glsl/functions/glsl-function-smoothstep-gentype.html',
['lion', 'intel'])
self.Skip('conformance/limits/gl-max-texture-dimensions.html',
['lion', 'intel'])
self.Skip('conformance/rendering/line-loop-tri-fan.html',
['lion', 'intel'])
self.Skip('conformance/ogles/GL/control_flow/control_flow_009_to_010.html',
['lion'], bug=322795)
self.Skip('conformance/ogles/GL/dot/dot_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/faceforward/faceforward_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/length/length_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/normalize/normalize_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/reflect/reflect_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/refract/refract_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/tan/tan_001_to_006.html',
['lion', 'intel'], bug=323736)
# Mac/ATI failures
self.Skip('conformance/extensions/oes-texture-float-with-image-data.html',
['mac', 'amd'], bug=308328)
self.Skip('conformance/rendering/gl-clear.html',
['mac', 'amd'], bug=308328)
self.Skip('conformance/textures/tex-image-and-sub-image-2d-with-array-buffer-view.html',
['mac', 'amd'], bug=308328)
self.Skip('conformance/textures/tex-image-and-sub-image-2d-with-image-data.html',
['mac', 'amd'], bug=308328)
self.Skip('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgb565.html',
['mac', 'amd'], bug=308328)
self.Skip('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgba4444.html',
['mac', 'amd'], bug=308328)
self.Skip('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgba5551.html',
['mac', 'amd'], bug=308328)
self.Fail('conformance/canvas/drawingbuffer-test.html',
['mac', 'amd'], bug=314997)
# Linux/NVIDIA failures
self.Fail('conformance/glsl/misc/empty_main.vert.html',
['linux', ('nvidia', 0x1040)], bug=325884)
self.Fail('conformance/glsl/misc/gl_position_unset.vert.html',
['linux', ('nvidia', 0x1040)], bug=325884)
self.Fail('conformance/uniforms/uniform-location.html',
['linux', ('nvidia', 0x1040)], bug=325884)
# Android failures
self.Fail('conformance/textures/texture-npot-video.html',
['android'], bug=306485)
# The following test is very slow and therefore times out on Android bot.
self.Skip('conformance/rendering/multisample-corruption.html',
['android'])
self.Fail('conformance/canvas/drawingbuffer-test.html',
['android'], bug=314997)
self.Fail('conformance/glsl/misc/empty_main.vert.html',
['android'], bug=315976)
self.Fail('conformance/glsl/misc/gl_position_unset.vert.html',
['android'], bug=315976)
# Skip slow tests.
self.Skip('conformance/context/context-creation-and-destruction.html',
bug=322689)
self.Skip('conformance/rendering/multisample-corruption.html',
bug=322689)
| qtekfun/htcDesire820Kernel | external/chromium_org/content/test/gpu/gpu_tests/webgl_conformance_expectations.py | Python | gpl-2.0 | 9,671 |
#!/usr/bin/env python
# (c) 2013, Greg Buehler
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Zabbix Server external inventory script.
========================================
Returns hosts and hostgroups from Zabbix Server.
If you want to run with --limit against a host group with space in the
name, use asterisk. For example --limit="Linux*servers".
Configuration is read from `zabbix.ini`.
Tested with Zabbix Server 2.0.6 and 3.2.3.
"""
from __future__ import print_function
import os
import sys
import argparse
try:
import ConfigParser as configparser
except ImportError:
import configparser
try:
from zabbix_api import ZabbixAPI
except:
print("Error: Zabbix API library must be installed: pip install zabbix-api.",
file=sys.stderr)
sys.exit(1)
import json
class ZabbixInventory(object):
def read_settings(self):
config = configparser.SafeConfigParser()
conf_path = './zabbix.ini'
if not os.path.exists(conf_path):
conf_path = os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini'
if os.path.exists(conf_path):
config.read(conf_path)
# server
if config.has_option('zabbix', 'server'):
self.zabbix_server = config.get('zabbix', 'server')
# login
if config.has_option('zabbix', 'username'):
self.zabbix_username = config.get('zabbix', 'username')
if config.has_option('zabbix', 'password'):
self.zabbix_password = config.get('zabbix', 'password')
# ssl certs
if config.has_option('zabbix', 'validate_certs'):
if config.get('zabbix', 'validate_certs') in ['false', 'False', False]:
self.validate_certs = False
def read_cli(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
self.options = parser.parse_args()
def hoststub(self):
return {
'hosts': []
}
def get_host(self, api, name):
data = {'ansible_ssh_host': name}
return data
def get_list(self, api):
hostsData = api.host.get({'output': 'extend', 'selectGroups': 'extend'})
data = {}
data[self.defaultgroup] = self.hoststub()
for host in hostsData:
hostname = host['name']
data[self.defaultgroup]['hosts'].append(hostname)
for group in host['groups']:
groupname = group['name']
if groupname not in data:
data[groupname] = self.hoststub()
data[groupname]['hosts'].append(hostname)
# Prevents Ansible from calling this script for each server with --host
data['_meta'] = {'hostvars': self.meta}
return data
def __init__(self):
self.defaultgroup = 'group_all'
self.zabbix_server = None
self.zabbix_username = None
self.zabbix_password = None
self.validate_certs = True
self.meta = {}
self.read_settings()
self.read_cli()
if self.zabbix_server and self.zabbix_username:
try:
api = ZabbixAPI(server=self.zabbix_server, validate_certs=self.validate_certs)
api.login(user=self.zabbix_username, password=self.zabbix_password)
except BaseException as e:
print("Error: Could not login to Zabbix server. Check your zabbix.ini.", file=sys.stderr)
sys.exit(1)
if self.options.host:
data = self.get_host(api, self.options.host)
print(json.dumps(data, indent=2))
elif self.options.list:
data = self.get_list(api)
print(json.dumps(data, indent=2))
else:
print("usage: --list ..OR.. --host <hostname>", file=sys.stderr)
sys.exit(1)
else:
print("Error: Configuration of server and credentials are required. See zabbix.ini.", file=sys.stderr)
sys.exit(1)
ZabbixInventory()
| alexlo03/ansible | contrib/inventory/zabbix.py | Python | gpl-3.0 | 4,795 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for SpacetoDepth op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class SpaceToDepthTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
input_nhwc = math_ops.cast(inputs, dtype)
with self.test_session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
if test.is_gpu_available():
with self.test_session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# test NCHW on GPU
input_nchw = test_util.NHWCToNCHW(input_nhwc)
output_nchw = array_ops.space_to_depth(
input_nchw, block_size, data_format="NCHW")
output_nhwc = test_util.NCHWToNHWC(output_nchw)
self.assertAllEqual(output_nhwc.eval(), outputs)
def testBasic(self):
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4]]]]
self._testOne(x_np, block_size, x_out)
def testBasicFloat16(self):
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4]]]]
self._testOne(x_np, block_size, x_out, dtype=dtypes.float16)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testLargerInput2x2(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered in depth. Here, larger block size.
def testLargerInput4x4(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 4
x_out = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleaved(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions AND for larger input depths.
# To make sure elements are properly interleaved in depth and ordered
# spatially.
def testDepthInterleavedLarge(self):
x_np = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40], [5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
self._testOne(x_np, block_size, x_out)
def testBlockSize2Batch10(self):
block_size = 2
def batch_input_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
def batch_output_elt(i):
return [[[1 * i, 2 * i, 3 * i, 4 * i], [5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
self._testOne(x_np, block_size, x_out)
def testBatchSize0(self):
block_size = 2
batch_size = 0
input_nhwc = array_ops.ones([batch_size, 4, 6, 3])
x_out = array_ops.ones([batch_size, 2, 3, 12])
with self.test_session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
x_tf.eval()
if test.is_gpu_available():
with self.test_session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
x_tf.eval()
# Tests for different width and height.
def testNonSquare(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]], [[5, 50], [6, 60]],
[[7, 70], [8, 80]], [[9, 90], [10, 100]], [[11, 110], [12, 120]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]], [[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
self._testOne(x_np, block_size, x_out)
# Error handling:
def testInputWrongDimMissingDepth(self):
# The input is missing the last dimension ("depth")
x_np = [[[1, 2], [3, 4]]]
block_size = 2
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
block_size = 2
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 1
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 10
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleBoth(self):
# The block size does not divide neither width or height.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testUnknownShape(self):
t = array_ops.space_to_depth(
array_ops.placeholder(dtypes.float32), block_size=4)
self.assertEqual(4, t.get_shape().ndims)
def spaceToDepthUsingTranspose(self, tensor, block_size, data_format):
block_size_sq = block_size * block_size
if data_format == "NHWC":
b, ih, iw, ic = tensor.shape.as_list()
assert ih % block_size == 0, (ih, block_size)
assert iw % block_size == 0, (iw, block_size)
ow, oh, oc = iw // block_size, ih // block_size, ic * block_size_sq
tensor = array_ops.reshape(tensor,
[b, oh, block_size, ow, block_size, ic])
tensor = array_ops.transpose(tensor, [0, 1, 3, 2, 4, 5])
tensor = array_ops.reshape(tensor, [b, oh, ow, oc])
elif data_format == "NCHW":
b, ic, ih, iw = tensor.shape.as_list()
assert ih % block_size == 0, (ih, block_size)
assert iw % block_size == 0, (iw, block_size)
ow, oh, oc = iw // block_size, ih // block_size, ic * block_size_sq
tensor = array_ops.reshape(tensor,
[b, ic, oh, block_size, ow, block_size])
tensor = array_ops.transpose(tensor, [0, 3, 5, 1, 2, 4])
tensor = array_ops.reshape(tensor, [b, oc, oh, ow])
return tensor
def compareToTranspose(self, batch_size, out_height, out_width, in_channels,
block_size, data_format, use_gpu):
in_height = out_height * block_size
in_width = out_width * block_size
nhwc_input_shape = [batch_size, in_height, in_width, in_channels]
nchw_input_shape = [batch_size, in_channels, in_height, in_width]
total_size = np.prod(nhwc_input_shape)
if data_format == "NCHW_VECT_C":
# Initialize the input tensor with qint8 values that circle -127..127.
x = [((f + 128) % 255) - 127 for f in range(total_size)]
t = constant_op.constant(x, shape=nhwc_input_shape, dtype=dtypes.float32)
expected = self.spaceToDepthUsingTranspose(t, block_size, "NHWC")
t = test_util.NHWCToNCHW_VECT_C(t)
t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
t = array_ops.space_to_depth(t, block_size, data_format="NCHW_VECT_C")
t = gen_array_ops.dequantize(t, -128, 127)
actual = test_util.NCHW_VECT_CToNHWC(t)
else:
# Initialize the input tensor with ascending whole numbers as floats.
x = [f * 1.0 for f in range(total_size)]
shape = nchw_input_shape if data_format == "NCHW" else nhwc_input_shape
t = constant_op.constant(x, shape=shape, dtype=dtypes.float32)
expected = self.spaceToDepthUsingTranspose(t, block_size, data_format)
actual = array_ops.space_to_depth(t, block_size, data_format=data_format)
with self.test_session(use_gpu=use_gpu) as sess:
actual_vals, expected_vals = sess.run([actual, expected])
self.assertTrue(np.array_equal(actual_vals, expected_vals))
def testAgainstTranspose(self):
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", False)
self.compareToTranspose(1, 2, 3, 2, 2, "NHWC", False)
self.compareToTranspose(1, 2, 3, 2, 3, "NHWC", False)
if not test.is_gpu_available():
tf_logging.info("skipping gpu tests since gpu not available")
return
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", True)
self.compareToTranspose(3, 2, 3, 2, 2, "NHWC", True)
self.compareToTranspose(3, 2, 3, 1, 2, "NCHW", True)
self.compareToTranspose(3, 2, 3, 2, 3, "NCHW", True)
self.compareToTranspose(5, 7, 11, 3, 2, "NCHW", True)
self.compareToTranspose(3, 2, 3, 4, 2, "NCHW_VECT_C", True)
self.compareToTranspose(3, 2, 3, 8, 3, "NCHW_VECT_C", True)
self.compareToTranspose(5, 7, 11, 12, 2, "NCHW_VECT_C", True)
class SpaceToDepthGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size, data_format):
# NCHW is implemented for only GPU.
if data_format == "NCHW" and not test.is_gpu_available():
return
assert 4 == x.ndim
with self.test_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.space_to_depth(tf_x, block_size, data_format=data_format)
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for space_to_depth of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size, data_format):
block_size_sq = block_size * block_size
data = np.random.normal(0, 1, b * h * w * d * block_size_sq).astype(
np.float32)
if data_format == "NHWC":
x = data.reshape([b, h * block_size, w * block_size, d])
else:
x = data.reshape([b, d, h * block_size, w * block_size])
self._checkGrad(x, block_size, data_format)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
self._compare(1, 2, 3, 5, block_size, "NHWC")
self._compare(1, 2, 3, 5, block_size, "NCHW")
def testSmall2(self):
block_size = 2
self._compare(2, 4, 3, 2, block_size, "NHWC")
self._compare(2, 4, 3, 2, block_size, "NCHW")
if __name__ == "__main__":
test.main()
| nburn42/tensorflow | tensorflow/python/kernel_tests/spacetodepth_op_test.py | Python | apache-2.0 | 13,957 |
#!/usr/bin/env python
#
# This script is designed to be invoked by soy2html.sh.
# Usage:
#
# buck/docs$ python soy2html.py <output_dir>
#
# This will write all of the static content to the specified output directory.
# You may want to verify that things worked correctly by running:
#
# python -m SimpleHTTPServer <output_dir>
#
# and then navigating to http://localhost:8000/.
#
# When this script is run, soyweb should already be running locally on port
# 9814 via ./docs/soyweb-prod.sh.
import os
import subprocess
import sys
import time
URL_ROOT = 'http://localhost:9814/'
def main(output_dir):
# Iterate over the files in the docs directory and copy them, as
# appropriate.
for root, dirs, files in os.walk('.'):
for file_name in files:
if file_name.endswith('.soy') and not file_name.startswith('__'):
# Strip the './' prefix, if appropriate.
if root.startswith('./'):
root = root[2:]
# Construct the URL where the .soy file is being served.
soy_file = file_name
html_file = root + '/' + soy_file[:-len('.soy')] + '.html'
url = URL_ROOT + html_file
copy_dest = ensure_dir(html_file, output_dir)
subprocess.check_call([
"curl", "--fail", "--output", copy_dest, url
])
elif (file_name == ".nojekyll" or
file_name == "CNAME" or
file_name.endswith('.css') or
file_name.endswith('.jpg') or
file_name.endswith('.js') or
file_name.endswith('.png') or
file_name.endswith('.gif') or
file_name.endswith('.html') or
file_name.endswith('.md') or
file_name.endswith('.svg') or
file_name.endswith('.ttf') or
file_name.endswith('.txt')):
# Copy the static resource to output_dir.
relative_path = os.path.join(root, file_name)
with open(relative_path) as resource_file:
resource = resource_file.read()
copy_to_output_dir(relative_path, output_dir, resource)
def ensure_dir(path, output_dir):
last_slash = path.rfind('/')
if last_slash != -1:
output_subdir = os.path.join(output_dir, path[:last_slash])
if not os.path.exists(output_subdir):
os.makedirs(output_subdir)
return os.path.join(output_dir, path)
def copy_to_output_dir(path, output_dir, content):
output_file = ensure_dir(path, output_dir)
with open(output_file, 'w') as f:
f.write(content)
def pollForServerReady():
SERVER_START_POLL = 5
print 'Waiting for server to start.'
for _ in range(0, SERVER_START_POLL):
result = subprocess.call(['curl', '--fail', '-I', URL_ROOT])
if result == 0:
return
time.sleep(1)
print 'Server failed to start after %s seconds.' % SERVER_START_POLL
if __name__ == '__main__':
output_dir = sys.argv[1]
pollForServerReady()
main(output_dir)
| raviagarwal7/buck | docs/soy2html.py | Python | apache-2.0 | 3,176 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
# 2013 F Wolff
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""An API to provide spell checking for use in checks or elsewhere."""
import logging
logger = logging.getLogger(__name__)
available = False
try:
# Enchant
from enchant import checker, Error as EnchantError
available = True
checkers = {}
def _get_checker(lang):
if not lang in checkers:
try:
checkers[lang] = checker.SpellChecker(lang)
# some versions only report an error when checking something
checkers[lang].check(u'bla')
except EnchantError as e:
# sometimes this is raised instead of DictNotFoundError
logger.error(str(e))
checkers[lang] = None
return checkers[lang]
def check(text, lang):
spellchecker = _get_checker(lang)
if not spellchecker:
return
spellchecker.set_text(unicode(text))
for err in spellchecker:
yield err.word, err.wordpos, err.suggest()
def simple_check(text, lang):
spellchecker = _get_checker(lang)
if not spellchecker:
return
spellchecker.set_text(unicode(text))
for err in spellchecker:
yield err.word
except ImportError:
def check(text, lang):
return []
def simple_check(text, lang):
return []
| biswajitsahu/kuma | vendor/packages/translate/filters/spelling.py | Python | mpl-2.0 | 2,109 |
# -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
Requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2015 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.8.1'
__build__ = 0x020801
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2015 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| LockScreen/Backend | venv/lib/python2.7/site-packages/requests/__init__.py | Python | mit | 1,861 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import codecs
import csv
import fnmatch
import inspect
import locale
import os
import openerp.sql_db as sql_db
import re
import logging
import tarfile
import tempfile
import threading
from babel.messages import extract
from collections import defaultdict
from datetime import datetime
from lxml import etree
from os.path import join
import config
import misc
from misc import SKIPPED_ELEMENT_TYPES
import osutil
import openerp
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
# used to notify web client that these translations should be loaded in the UI
WEB_TRANSLATION_COMMENT = "openerp-web"
SKIPPED_ELEMENTS = ('script', 'style')
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
'zh_CN': 'Chinese_China',
'zh_TW': 'Chinese_Taiwan',
'cs_CZ': 'Czech_Czech Republic',
'da_DK': 'Danish_Denmark',
'nl_NL': 'Dutch_Netherlands',
'et_EE': 'Estonian_Estonia',
'fa_IR': 'Farsi_Iran',
'ph_PH': 'Filipino_Philippines',
'fi_FI': 'Finnish_Finland',
'fr_FR': 'French_France',
'fr_BE': 'French_France',
'fr_CH': 'French_France',
'fr_CA': 'French_France',
'ga': 'Scottish Gaelic',
'gl_ES': 'Galician_Spain',
'ka_GE': 'Georgian_Georgia',
'de_DE': 'German_Germany',
'el_GR': 'Greek_Greece',
'gu': 'Gujarati_India',
'he_IL': 'Hebrew_Israel',
'hi_IN': 'Hindi',
'hu': 'Hungarian_Hungary',
'is_IS': 'Icelandic_Iceland',
'id_ID': 'Indonesian_indonesia',
'it_IT': 'Italian_Italy',
'ja_JP': 'Japanese_Japan',
'kn_IN': 'Kannada',
'km_KH': 'Khmer',
'ko_KR': 'Korean_Korea',
'lo_LA': 'Lao_Laos',
'lt_LT': 'Lithuanian_Lithuania',
'lat': 'Latvian_Latvia',
'ml_IN': 'Malayalam_India',
'mi_NZ': 'Maori',
'mn': 'Cyrillic_Mongolian',
'no_NO': 'Norwegian_Norway',
'nn_NO': 'Norwegian-Nynorsk_Norway',
'pl': 'Polish_Poland',
'pt_PT': 'Portuguese_Portugal',
'pt_BR': 'Portuguese_Brazil',
'ro_RO': 'Romanian_Romania',
'ru_RU': 'Russian_Russia',
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
'sk_SK': 'Slovak_Slovakia',
'sl_SI': 'Slovenian_Slovenia',
#should find more specific locales for spanish countries,
#but better than nothing
'es_AR': 'Spanish_Spain',
'es_BO': 'Spanish_Spain',
'es_CL': 'Spanish_Spain',
'es_CO': 'Spanish_Spain',
'es_CR': 'Spanish_Spain',
'es_DO': 'Spanish_Spain',
'es_EC': 'Spanish_Spain',
'es_ES': 'Spanish_Spain',
'es_GT': 'Spanish_Spain',
'es_HN': 'Spanish_Spain',
'es_MX': 'Spanish_Spain',
'es_NI': 'Spanish_Spain',
'es_PA': 'Spanish_Spain',
'es_PE': 'Spanish_Spain',
'es_PR': 'Spanish_Spain',
'es_PY': 'Spanish_Spain',
'es_SV': 'Spanish_Spain',
'es_UY': 'Spanish_Spain',
'es_VE': 'Spanish_Spain',
'sv_SE': 'Swedish_Sweden',
'ta_IN': 'English_Australia',
'th_TH': 'Thai_Thailand',
'tr_TR': 'Turkish_Turkey',
'uk_UA': 'Ukrainian_Ukraine',
'vi_VN': 'Vietnamese_Viet Nam',
'tlh_TLH': 'Klingon',
}
# These are not all english small words, just those that could potentially be isolated within views
ENGLISH_SMALL_WORDS = set("as at by do go if in me no of ok on or to up us we".split())
class UNIX_LINE_TERMINATOR(csv.excel):
lineterminator = '\n'
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
#
# Warning: better use self.pool.get('ir.translation')._get_source if you can
#
def translate(cr, name, source_type, lang, source=None):
if source and name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s and src=%s', (lang, source_type, str(name), source))
elif name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s', (lang, source_type, str(name)))
elif source:
cr.execute('select value from ir_translation where lang=%s and type=%s and src=%s', (lang, source_type, source))
res_trans = cr.fetchone()
res = res_trans and res_trans[0] or False
return res
class GettextAlias(object):
def _get_db(self):
# find current DB based on thread/worker db name (see netsvc)
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
return sql_db.db_connect(db_name)
def _get_cr(self, frame, allow_create=True):
# try, in order: cr, cursor, self.env.cr, self.cr,
# request.env.cr
if 'cr' in frame.f_locals:
return frame.f_locals['cr'], False
if 'cursor' in frame.f_locals:
return frame.f_locals['cursor'], False
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
return s.env.cr, False
if hasattr(s, 'cr'):
return s.cr, False
try:
from openerp.http import request
return request.env.cr, False
except RuntimeError:
pass
if allow_create:
# create a new cursor
db = self._get_db()
if db is not None:
return db.cursor(), True
return None, False
def _get_uid(self, frame):
# try, in order: uid, user, self.env.uid
if 'uid' in frame.f_locals:
return frame.f_locals['uid']
if 'user' in frame.f_locals:
return int(frame.f_locals['user']) # user may be a record
s = frame.f_locals.get('self')
return s.env.uid
def _get_lang(self, frame):
# try, in order: context.get('lang'), kwargs['context'].get('lang'),
# self.env.lang, self.localcontext.get('lang'), request.env.lang
lang = None
if frame.f_locals.get('context'):
lang = frame.f_locals['context'].get('lang')
if not lang:
kwargs = frame.f_locals.get('kwargs', {})
if kwargs.get('context'):
lang = kwargs['context'].get('lang')
if not lang:
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
lang = s.env.lang
if not lang:
if hasattr(s, 'localcontext'):
lang = s.localcontext.get('lang')
if not lang:
try:
from openerp.http import request
lang = request.env.lang
except RuntimeError:
pass
if not lang:
# Last resort: attempt to guess the language of the user
# Pitfall: some operations are performed in sudo mode, and we
# don't know the originial uid, so the language may
# be wrong when the admin language differs.
pool = getattr(s, 'pool', None)
(cr, dummy) = self._get_cr(frame, allow_create=False)
uid = self._get_uid(frame)
if pool and cr and uid:
lang = pool['res.users'].context_get(cr, uid)['lang']
return lang
def __call__(self, source):
res = source
cr = None
is_new_cr = False
try:
frame = inspect.currentframe()
if frame is None:
return source
frame = frame.f_back
if not frame:
return source
lang = self._get_lang(frame)
if lang:
cr, is_new_cr = self._get_cr(frame)
if cr:
# Try to use ir.translation to benefit from global cache if possible
registry = openerp.registry(cr.dbname)
res = registry['ir.translation']._get_source(cr, SUPERUSER_ID, None, ('code','sql_constraint'), lang, source)
else:
_logger.debug('no context cursor detected, skipping translation for "%r"', source)
else:
_logger.debug('no translation language detected, skipping translation for "%r" ', source)
except Exception:
_logger.debug('translation went wrong for "%r", skipped', source)
# if so, double-check the root/base translations filenames
finally:
if cr and is_new_cr:
cr.close()
return res
_ = GettextAlias()
def quote(s):
"""Returns quoted PO term string, with special PO characters escaped"""
assert r"\n" not in s, "Translation terms may not include escaped newlines ('\\n'), please use only literal newlines! (in '%s')" % s
return '"%s"' % s.replace('\\','\\\\') \
.replace('"','\\"') \
.replace('\n', '\\n"\n"')
re_escaped_char = re.compile(r"(\\.)")
re_escaped_replacements = {'n': '\n', }
def _sub_replacement(match_obj):
return re_escaped_replacements.get(match_obj.group(1)[1], match_obj.group(1)[1])
def unquote(str):
"""Returns unquoted PO term string, with special PO characters unescaped"""
return re_escaped_char.sub(_sub_replacement, str[1:-1])
# class to handle po files
class TinyPoFile(object):
def __init__(self, buffer):
self.buffer = buffer
def warn(self, msg, *args):
_logger.warning(msg, *args)
def __iter__(self):
self.buffer.seek(0)
self.lines = self._get_lines()
self.lines_count = len(self.lines)
self.first = True
self.extra_lines= []
return self
def _get_lines(self):
lines = self.buffer.readlines()
# remove the BOM (Byte Order Mark):
if len(lines):
lines[0] = unicode(lines[0], 'utf8').lstrip(unicode( codecs.BOM_UTF8, "utf8"))
lines.append('') # ensure that the file ends with at least an empty line
return lines
def cur_line(self):
return self.lines_count - len(self.lines)
def next(self):
trans_type = name = res_id = source = trad = None
if self.extra_lines:
trans_type, name, res_id, source, trad, comments = self.extra_lines.pop(0)
if not res_id:
res_id = '0'
else:
comments = []
targets = []
line = None
fuzzy = False
while not line:
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0).strip()
while line.startswith('#'):
if line.startswith('#~ '):
break
if line.startswith('#.'):
line = line[2:].strip()
if not line.startswith('module:'):
comments.append(line)
elif line.startswith('#:'):
# Process the `reference` comments. Each line can specify
# multiple targets (e.g. model, view, code, selection,
# ...). For each target, we will return an additional
# entry.
for lpart in line[2:].strip().split(' '):
trans_info = lpart.strip().split(':',2)
if trans_info and len(trans_info) == 2:
# looks like the translation trans_type is missing, which is not
# unexpected because it is not a GetText standard. Default: 'code'
trans_info[:0] = ['code']
if trans_info and len(trans_info) == 3:
# this is a ref line holding the destination info (model, field, record)
targets.append(trans_info)
elif line.startswith('#,') and (line[2:].strip() == 'fuzzy'):
fuzzy = True
line = self.lines.pop(0).strip()
if not self.lines:
raise StopIteration()
while not line:
# allow empty lines between comments and msgid
line = self.lines.pop(0).strip()
if line.startswith('#~ '):
while line.startswith('#~ ') or not line.strip():
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0)
# This has been a deprecated entry, don't return anything
return self.next()
if not line.startswith('msgid'):
raise Exception("malformed file: bad line: %s" % line)
source = unquote(line[6:])
line = self.lines.pop(0).strip()
if not source and self.first:
self.first = False
# if the source is "" and it's the first msgid, it's the special
# msgstr with the informations about the traduction and the
# traductor; we skip it
self.extra_lines = []
while line:
line = self.lines.pop(0).strip()
return self.next()
while not line.startswith('msgstr'):
if not line:
raise Exception('malformed file at %d'% self.cur_line())
source += unquote(line)
line = self.lines.pop(0).strip()
trad = unquote(line[7:])
line = self.lines.pop(0).strip()
while line:
trad += unquote(line)
line = self.lines.pop(0).strip()
if targets and not fuzzy:
# Use the first target for the current entry (returned at the
# end of this next() call), and keep the others to generate
# additional entries (returned the next next() calls).
trans_type, name, res_id = targets.pop(0)
for t, n, r in targets:
if t == trans_type == 'code': continue
self.extra_lines.append((t, n, r, source, trad, comments))
if name is None:
if not fuzzy:
self.warn('Missing "#:" formated comment at line %d for the following source:\n\t%s',
self.cur_line(), source[:30])
return self.next()
return trans_type, name, res_id, source, trad, '\n'.join(comments)
def write_infos(self, modules):
import openerp.release as release
self.buffer.write("# Translation of %(project)s.\n" \
"# This file contains the translation of the following modules:\n" \
"%(modules)s" \
"#\n" \
"msgid \"\"\n" \
"msgstr \"\"\n" \
'''"Project-Id-Version: %(project)s %(version)s\\n"\n''' \
'''"Report-Msgid-Bugs-To: \\n"\n''' \
'''"POT-Creation-Date: %(now)s\\n"\n''' \
'''"PO-Revision-Date: %(now)s\\n"\n''' \
'''"Last-Translator: <>\\n"\n''' \
'''"Language-Team: \\n"\n''' \
'''"MIME-Version: 1.0\\n"\n''' \
'''"Content-Type: text/plain; charset=UTF-8\\n"\n''' \
'''"Content-Transfer-Encoding: \\n"\n''' \
'''"Plural-Forms: \\n"\n''' \
"\n"
% { 'project': release.description,
'version': release.version,
'modules': reduce(lambda s, m: s + "#\t* %s\n" % m, modules, ""),
'now': datetime.utcnow().strftime('%Y-%m-%d %H:%M')+"+0000",
}
)
def write(self, modules, tnrs, source, trad, comments=None):
plurial = len(modules) > 1 and 's' or ''
self.buffer.write("#. module%s: %s\n" % (plurial, ', '.join(modules)))
if comments:
self.buffer.write(''.join(('#. %s\n' % c for c in comments)))
code = False
for typy, name, res_id in tnrs:
self.buffer.write("#: %s:%s:%s\n" % (typy, name, res_id))
if typy == 'code':
code = True
if code:
# only strings in python code are python formated
self.buffer.write("#, python-format\n")
if not isinstance(trad, unicode):
trad = unicode(trad, 'utf8')
if not isinstance(source, unicode):
source = unicode(source, 'utf8')
msg = "msgid %s\n" \
"msgstr %s\n\n" \
% (quote(source), quote(trad))
self.buffer.write(msg.encode('utf8'))
# Methods to export the translation file
def trans_export(lang, modules, buffer, format, cr):
def _process(format, modules, rows, buffer, lang):
if format == 'csv':
writer = csv.writer(buffer, 'UNIX')
# write header first
writer.writerow(("module","type","name","res_id","src","value"))
for module, type, name, res_id, src, trad, comments in rows:
# Comments are ignored by the CSV writer
writer.writerow((module, type, name, res_id, src, trad))
elif format == 'po':
writer = TinyPoFile(buffer)
writer.write_infos(modules)
# we now group the translations by source. That means one translation per source.
grouped_rows = {}
for module, type, name, res_id, src, trad, comments in rows:
row = grouped_rows.setdefault(src, {})
row.setdefault('modules', set()).add(module)
if not row.get('translation') and trad != src:
row['translation'] = trad
row.setdefault('tnrs', []).append((type, name, res_id))
row.setdefault('comments', set()).update(comments)
for src, row in sorted(grouped_rows.items()):
if not lang:
# translation template, so no translation value
row['translation'] = ''
elif not row.get('translation'):
row['translation'] = src
writer.write(row['modules'], row['tnrs'], src, row['translation'], row['comments'])
elif format == 'tgz':
rows_by_module = {}
for row in rows:
module = row[0]
rows_by_module.setdefault(module, []).append(row)
tmpdir = tempfile.mkdtemp()
for mod, modrows in rows_by_module.items():
tmpmoddir = join(tmpdir, mod, 'i18n')
os.makedirs(tmpmoddir)
pofilename = (lang if lang else mod) + ".po" + ('t' if not lang else '')
buf = file(join(tmpmoddir, pofilename), 'w')
_process('po', [mod], modrows, buf, lang)
buf.close()
tar = tarfile.open(fileobj=buffer, mode='w|gz')
tar.add(tmpdir, '')
tar.close()
else:
raise Exception(_('Unrecognized extension: must be one of '
'.csv, .po, or .tgz (received .%s).' % format))
translations = trans_generate(lang, modules, cr)
modules = set(t[0] for t in translations)
_process(format, modules, translations, buffer, lang)
del translations
def trans_parse_xsl(de):
return list(set(trans_parse_xsl_aux(de, False)))
def trans_parse_xsl_aux(de, t):
res = []
for n in de:
t = t or n.get("t")
if t:
if isinstance(n, SKIPPED_ELEMENT_TYPES) or n.tag.startswith('{http://www.w3.org/1999/XSL/Transform}'):
continue
if n.text:
l = n.text.strip().replace('\n',' ')
if len(l):
res.append(l.encode("utf8"))
if n.tail:
l = n.tail.strip().replace('\n',' ')
if len(l):
res.append(l.encode("utf8"))
res.extend(trans_parse_xsl_aux(n, t))
return res
def trans_parse_rml(de):
res = []
for n in de:
for m in n:
if isinstance(m, SKIPPED_ELEMENT_TYPES) or not m.text:
continue
string_list = [s.replace('\n', ' ').strip() for s in re.split('\[\[.+?\]\]', m.text)]
for s in string_list:
if s:
res.append(s.encode("utf8"))
res.extend(trans_parse_rml(n))
return res
def _push(callback, term, source_line):
""" Sanity check before pushing translation terms """
term = (term or "").strip().encode('utf8')
# Avoid non-char tokens like ':' '...' '.00' etc.
if len(term) > 8 or any(x.isalpha() for x in term):
callback(term, source_line)
def trans_parse_view(element, callback):
""" Helper method to recursively walk an etree document representing a
regular view and call ``callback(term)`` for each translatable term
that is found in the document.
:param ElementTree element: root of etree document to extract terms from
:param callable callback: a callable in the form ``f(term, source_line)``,
that will be called for each extracted term.
"""
for el in element.iter():
if (not isinstance(el, SKIPPED_ELEMENT_TYPES)
and el.tag.lower() not in SKIPPED_ELEMENTS
and el.get("translation", '').strip() != "off"
and el.text):
_push(callback, el.text, el.sourceline)
if el.tail:
_push(callback, el.tail, el.sourceline)
for attr in ('string', 'help', 'sum', 'confirm', 'placeholder'):
value = el.get(attr)
if value:
_push(callback, value, el.sourceline)
# tests whether an object is in a list of modules
def in_modules(object_name, modules):
if 'all' in modules:
return True
module_dict = {
'ir': 'base',
'res': 'base',
'workflow': 'base',
}
module = object_name.split('.')[0]
module = module_dict.get(module, module)
return module in modules
def _extract_translatable_qweb_terms(element, callback):
""" Helper method to walk an etree document representing
a QWeb template, and call ``callback(term)`` for each
translatable term that is found in the document.
:param etree._Element element: root of etree document to extract terms from
:param Callable callback: a callable in the form ``f(term, source_line)``,
that will be called for each extracted term.
"""
# not using elementTree.iterparse because we need to skip sub-trees in case
# the ancestor element had a reason to be skipped
for el in element:
if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
if (el.tag.lower() not in SKIPPED_ELEMENTS
and "t-js" not in el.attrib
and not ("t-jquery" in el.attrib and "t-operation" not in el.attrib)
and el.get("t-translation", '').strip() != "off"):
_push(callback, el.text, el.sourceline)
for att in ('title', 'alt', 'label', 'placeholder'):
if att in el.attrib:
_push(callback, el.attrib[att], el.sourceline)
_extract_translatable_qweb_terms(el, callback)
_push(callback, el.tail, el.sourceline)
def babel_extract_qweb(fileobj, keywords, comment_tags, options):
"""Babel message extractor for qweb template files.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param comment_tags: a list of translator tags to search for and
include in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: Iterable
"""
result = []
def handle_text(text, lineno):
result.append((lineno, None, text, []))
tree = etree.parse(fileobj)
_extract_translatable_qweb_terms(tree.getroot(), handle_text)
return result
def trans_generate(lang, modules, cr):
dbname = cr.dbname
registry = openerp.registry(dbname)
trans_obj = registry['ir.translation']
model_data_obj = registry['ir.model.data']
uid = 1
query = 'SELECT name, model, res_id, module' \
' FROM ir_model_data'
query_models = """SELECT m.id, m.model, imd.module
FROM ir_model AS m, ir_model_data AS imd
WHERE m.id = imd.res_id AND imd.model = 'ir.model' """
if 'all_installed' in modules:
query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') '
query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') "
query_param = None
if 'all' not in modules:
query += ' WHERE module IN %s'
query_models += ' AND imd.module in %s'
query_param = (tuple(modules),)
query += ' ORDER BY module, model, name'
query_models += ' ORDER BY module, model'
cr.execute(query, query_param)
_to_translate = set()
def push_translation(module, type, name, id, source, comments=None):
# empty and one-letter terms are ignored, they probably are not meant to be
# translated, and would be very hard to translate anyway.
if not source or len(source.strip()) <= 1:
return
tnx = (module, source, name, id, type, tuple(comments or ()))
_to_translate.add(tnx)
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
def push(mod, type, name, res_id, term):
term = (term or '').strip()
if len(term) > 2 or term in ENGLISH_SMALL_WORDS:
push_translation(mod, type, name, res_id, term)
def get_root_view(xml_id):
view = model_data_obj.xmlid_to_object(cr, uid, xml_id)
if view:
while view.mode != 'primary':
view = view.inherit_id
xml_id = view.get_external_id(cr, uid).get(view.id, xml_id)
return xml_id
for (xml_name,model,res_id,module) in cr.fetchall():
module = encode(module)
model = encode(model)
xml_name = "%s.%s" % (module, encode(xml_name))
if model not in registry:
_logger.error("Unable to find object %r", model)
continue
Model = registry[model]
if not Model._translate:
# explicitly disabled
continue
obj = Model.browse(cr, uid, res_id)
if not obj.exists():
_logger.warning("Unable to find object %r with id %d", model, res_id)
continue
if model=='ir.ui.view':
d = etree.XML(encode(obj.arch))
if obj.type == 'qweb':
view_id = get_root_view(xml_name)
push_qweb = lambda t,l: push(module, 'view', 'website', view_id, t)
_extract_translatable_qweb_terms(d, push_qweb)
else:
push_view = lambda t,l: push(module, 'view', obj.model, xml_name, t)
trans_parse_view(d, push_view)
elif model=='ir.actions.wizard':
pass # TODO Can model really be 'ir.actions.wizard' ?
elif model=='ir.model.fields':
try:
field_name = encode(obj.name)
except AttributeError, exc:
_logger.error("name error in %s: %s", xml_name, str(exc))
continue
objmodel = registry.get(obj.model)
if (objmodel is None or field_name not in objmodel._columns
or not objmodel._translate):
continue
field_def = objmodel._columns[field_name]
name = "%s,%s" % (encode(obj.model), field_name)
push_translation(module, 'field', name, 0, encode(field_def.string))
if field_def.help:
push_translation(module, 'help', name, 0, encode(field_def.help))
if field_def.translate:
ids = objmodel.search(cr, uid, [])
obj_values = objmodel.read(cr, uid, ids, [field_name])
for obj_value in obj_values:
res_id = obj_value['id']
if obj.name in ('ir.model', 'ir.ui.menu'):
res_id = 0
model_data_ids = model_data_obj.search(cr, uid, [
('model', '=', model),
('res_id', '=', res_id),
])
if not model_data_ids:
push_translation(module, 'model', name, 0, encode(obj_value[field_name]))
if hasattr(field_def, 'selection') and isinstance(field_def.selection, (list, tuple)):
for dummy, val in field_def.selection:
push_translation(module, 'selection', name, 0, encode(val))
elif model=='ir.actions.report.xml':
name = encode(obj.report_name)
fname = ""
if obj.report_rml:
fname = obj.report_rml
parse_func = trans_parse_rml
report_type = "report"
elif obj.report_xsl:
fname = obj.report_xsl
parse_func = trans_parse_xsl
report_type = "xsl"
if fname and obj.report_type in ('pdf', 'xsl'):
try:
report_file = misc.file_open(fname)
try:
d = etree.parse(report_file)
for t in parse_func(d.iter()):
push_translation(module, report_type, name, 0, t)
finally:
report_file.close()
except (IOError, etree.XMLSyntaxError):
_logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
for field_name, field_def in obj._columns.items():
if model == 'ir.model' and field_name == 'name' and obj.name == obj.model:
# ignore model name if it is the technical one, nothing to translate
continue
if field_def.translate:
name = model + "," + field_name
try:
term = obj[field_name] or ''
except:
term = ''
push_translation(module, 'model', name, xml_name, encode(term))
# End of data for ir.model.data query results
cr.execute(query_models, query_param)
def push_constraint_msg(module, term_type, model, msg):
if not hasattr(msg, '__call__'):
push_translation(encode(module), term_type, encode(model), 0, encode(msg))
def push_local_constraints(module, model, cons_type='sql_constraints'):
"""Climb up the class hierarchy and ignore inherited constraints
from other modules"""
term_type = 'sql_constraint' if cons_type == 'sql_constraints' else 'constraint'
msg_pos = 2 if cons_type == 'sql_constraints' else 1
for cls in model.__class__.__mro__:
if getattr(cls, '_module', None) != module:
continue
constraints = getattr(cls, '_local_' + cons_type, [])
for constraint in constraints:
push_constraint_msg(module, term_type, model._name, constraint[msg_pos])
for (_, model, module) in cr.fetchall():
if model not in registry:
_logger.error("Unable to find object %r", model)
continue
model_obj = registry[model]
if model_obj._constraints:
push_local_constraints(module, model_obj, 'constraints')
if model_obj._sql_constraints:
push_local_constraints(module, model_obj, 'sql_constraints')
installed_modules = map(
lambda m: m['name'],
registry['ir.module.module'].search_read(cr, uid, [('state', '=', 'installed')], fields=['name']))
path_list = [(path, True) for path in openerp.modules.module.ad_paths]
# Also scan these non-addon paths
for bin_path in ['osv', 'report', 'modules', 'service', 'tools']:
path_list.append((os.path.join(config.config['root_path'], bin_path), True))
# non-recursive scan for individual files in root directory but without
# scanning subdirectories that may contain addons
path_list.append((config.config['root_path'], False))
_logger.debug("Scanning modules at paths: %s", path_list)
def get_module_from_path(path):
for (mp, rec) in path_list:
if rec and path.startswith(mp) and os.path.dirname(path) != mp:
path = path[len(mp)+1:]
return path.split(os.path.sep)[0]
return 'base' # files that are not in a module are considered as being in 'base' module
def verified_module_filepaths(fname, path, root):
fabsolutepath = join(root, fname)
frelativepath = fabsolutepath[len(path):]
display_path = "addons%s" % frelativepath
module = get_module_from_path(fabsolutepath)
if ('all' in modules or module in modules) and module in installed_modules:
if os.path.sep != '/':
display_path = display_path.replace(os.path.sep, '/')
return module, fabsolutepath, frelativepath, display_path
return None, None, None, None
def babel_extract_terms(fname, path, root, extract_method="python", trans_type='code',
extra_comments=None, extract_keywords={'_': None}):
module, fabsolutepath, _, display_path = verified_module_filepaths(fname, path, root)
extra_comments = extra_comments or []
if not module: return
src_file = open(fabsolutepath, 'r')
try:
for extracted in extract.extract(extract_method, src_file,
keywords=extract_keywords):
# Babel 0.9.6 yields lineno, message, comments
# Babel 1.3 yields lineno, message, comments, context
lineno, message, comments = extracted[:3]
push_translation(module, trans_type, display_path, lineno,
encode(message), comments + extra_comments)
except Exception:
_logger.exception("Failed to extract terms from %s", fabsolutepath)
finally:
src_file.close()
for (path, recursive) in path_list:
_logger.debug("Scanning files of modules at %s", path)
for root, dummy, files in osutil.walksymlinks(path):
for fname in fnmatch.filter(files, '*.py'):
babel_extract_terms(fname, path, root)
# mako provides a babel extractor: http://docs.makotemplates.org/en/latest/usage.html#babel
for fname in fnmatch.filter(files, '*.mako'):
babel_extract_terms(fname, path, root, 'mako', trans_type='report')
# Javascript source files in the static/src/js directory, rest is ignored (libs)
if fnmatch.fnmatch(root, '*/static/src/js*'):
for fname in fnmatch.filter(files, '*.js'):
babel_extract_terms(fname, path, root, 'javascript',
extra_comments=[WEB_TRANSLATION_COMMENT],
extract_keywords={'_t': None, '_lt': None})
# QWeb template files
if fnmatch.fnmatch(root, '*/static/src/xml*'):
for fname in fnmatch.filter(files, '*.xml'):
babel_extract_terms(fname, path, root, 'openerp.tools.translate:babel_extract_qweb',
extra_comments=[WEB_TRANSLATION_COMMENT])
if not recursive:
# due to topdown, first iteration is in first level
break
out = []
# translate strings marked as to be translated
for module, source, name, id, type, comments in sorted(_to_translate):
trans = '' if not lang else trans_obj._get_source(cr, uid, name, type, lang, source)
out.append((module, type, name, id, source, encode(trans) or '', comments))
return out
def trans_load(cr, filename, lang, verbose=True, module_name=None, context=None):
try:
fileobj = misc.file_open(filename)
_logger.info("loading %s", filename)
fileformat = os.path.splitext(filename)[-1][1:].lower()
result = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, module_name=module_name, context=context)
fileobj.close()
return result
except IOError:
if verbose:
_logger.error("couldn't read translation file %s", filename)
return None
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, module_name=None, context=None):
"""Populates the ir_translation table."""
if verbose:
_logger.info('loading translation file for language %s', lang)
if context is None:
context = {}
db_name = cr.dbname
registry = openerp.registry(db_name)
lang_obj = registry.get('res.lang')
trans_obj = registry.get('ir.translation')
iso_lang = misc.get_iso_codes(lang)
try:
ids = lang_obj.search(cr, SUPERUSER_ID, [('code','=', lang)])
if not ids:
# lets create the language with locale information
lang_obj.load_lang(cr, SUPERUSER_ID, lang=lang, lang_name=lang_name)
# Parse also the POT: it will possibly provide additional targets.
# (Because the POT comments are correct on Launchpad but not the
# PO comments due to a Launchpad limitation. See LP bug 933496.)
pot_reader = []
# now, the serious things: we read the language file
fileobj.seek(0)
if fileformat == 'csv':
reader = csv.reader(fileobj, quotechar='"', delimiter=',')
# read the first line of the file (it contains columns titles)
for row in reader:
fields = row
break
elif fileformat == 'po':
reader = TinyPoFile(fileobj)
fields = ['type', 'name', 'res_id', 'src', 'value', 'comments']
# Make a reader for the POT file and be somewhat defensive for the
# stable branch.
if fileobj.name.endswith('.po'):
try:
# Normally the path looks like /path/to/xxx/i18n/lang.po
# and we try to find the corresponding
# /path/to/xxx/i18n/xxx.pot file.
# (Sometimes we have 'i18n_extra' instead of just 'i18n')
addons_module_i18n, _ = os.path.split(fileobj.name)
addons_module, i18n_dir = os.path.split(addons_module_i18n)
addons, module = os.path.split(addons_module)
pot_handle = misc.file_open(os.path.join(
addons, module, i18n_dir, module + '.pot'))
pot_reader = TinyPoFile(pot_handle)
except:
pass
else:
_logger.error('Bad file format: %s', fileformat)
raise Exception(_('Bad file format'))
# Read the POT references, and keep them indexed by source string.
class Target(object):
def __init__(self):
self.value = None
self.targets = set() # set of (type, name, res_id)
self.comments = None
pot_targets = defaultdict(Target)
for type, name, res_id, src, _, comments in pot_reader:
if type is not None:
target = pot_targets[src]
target.targets.add((type, name, res_id))
target.comments = comments
# read the rest of the file
irt_cursor = trans_obj._get_import_cursor(cr, SUPERUSER_ID, context=context)
def process_row(row):
"""Process a single PO (or POT) entry."""
# dictionary which holds values for this line of the csv file
# {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
# 'src': ..., 'value': ..., 'module':...}
dic = dict.fromkeys(('type', 'name', 'res_id', 'src', 'value',
'comments', 'imd_model', 'imd_name', 'module'))
dic['lang'] = lang
dic.update(zip(fields, row))
# discard the target from the POT targets.
src = dic['src']
if src in pot_targets:
target = pot_targets[src]
target.value = dic['value']
target.targets.discard((dic['type'], dic['name'], dic['res_id']))
# This would skip terms that fail to specify a res_id
res_id = dic['res_id']
if not res_id:
return
if isinstance(res_id, (int, long)) or \
(isinstance(res_id, basestring) and res_id.isdigit()):
dic['res_id'] = int(res_id)
dic['module'] = module_name
else:
# res_id is an xml id
dic['res_id'] = None
dic['imd_model'] = dic['name'].split(',')[0]
if '.' in res_id:
dic['module'], dic['imd_name'] = res_id.split('.', 1)
else:
dic['module'], dic['imd_name'] = False, res_id
irt_cursor.push(dic)
# First process the entries from the PO file (doing so also fills/removes
# the entries from the POT file).
for row in reader:
process_row(row)
# Then process the entries implied by the POT file (which is more
# correct w.r.t. the targets) if some of them remain.
pot_rows = []
for src, target in pot_targets.iteritems():
if target.value:
for type, name, res_id in target.targets:
pot_rows.append((type, name, res_id, src, target.value, target.comments))
pot_targets.clear()
for row in pot_rows:
process_row(row)
irt_cursor.finish()
trans_obj.clear_caches()
if verbose:
_logger.info("translation file loaded succesfully")
except IOError:
filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat)
_logger.exception("couldn't read translation file %s", filename)
def get_locales(lang=None):
if lang is None:
lang = locale.getdefaultlocale()[0]
if os.name == 'nt':
lang = _LOCALE2WIN32.get(lang, lang)
def process(enc):
ln = locale._build_localename((lang, enc))
yield ln
nln = locale.normalize(ln)
if nln != ln:
yield nln
for x in process('utf8'): yield x
prefenc = locale.getpreferredencoding()
if prefenc:
for x in process(prefenc): yield x
prefenc = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'cp1252': '1252',
}.get(prefenc.lower())
if prefenc:
for x in process(prefenc): yield x
yield lang
def resetlocale():
# locale.resetlocale is bugged with some locales.
for ln in get_locales():
try:
return locale.setlocale(locale.LC_ALL, ln)
except locale.Error:
continue
def load_language(cr, lang):
"""Loads a translation terms for a language.
Used mainly to automate language loading at db initialization.
:param lang: language ISO code with optional _underscore_ and l10n flavor (ex: 'fr', 'fr_BE', but not 'fr-BE')
:type lang: str
"""
registry = openerp.registry(cr.dbname)
language_installer = registry['base.language.install']
oid = language_installer.create(cr, SUPERUSER_ID, {'lang': lang})
language_installer.lang_install(cr, SUPERUSER_ID, [oid], context=None)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| dariemp/odoo | openerp/tools/translate.py | Python | agpl-3.0 | 45,371 |
# -*- coding: utf-8 -*-
"""
tests.contrib
~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the contrib modules.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
| pjknkda/werkzeug | tests/contrib/__init__.py | Python | bsd-3-clause | 207 |
"""
This module contains transformation functions (clip->clip)
One file for one fx. The file's name is the fx's name
"""
| kerimlcr/ab2017-dpyo | ornek/moviepy/moviepy-0.2.2.12/moviepy/video/fx/__init__.py | Python | gpl-3.0 | 121 |
## This file is part of Invenio.
## Copyright (C) 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Display the details of a record on which some operation is to be carried
out and prompt for the user's confirmation that it is the correct record.
Upon the clicking of the confirmation button, augment step by one.
"""
__revision__ = "$Id$"
import cgi
from invenio.config import CFG_SITE_ADMIN_EMAIL
from invenio.websubmit_config import \
InvenioWebSubmitFunctionStop, \
InvenioWebSubmitFunctionError
from invenio.search_engine import print_record, record_exists
## Details of record to display to the user for confirmation:
CFG_DOCUMENT_DETAILS_MESSAGE = """
<div>
We're about to process your request for the following document:<br /><br />
<table border="0">
<tr>
<td>Report Number(s):</td><td>%(report-numbers)s</td>
</tr>
<tr>
<td>Title:</td><td>%(title)s</td>
</tr>
<tr>
<td>Author(s):</td><td>%(author)s</td>
</tr>
</table>
<br />
If this is correct, please CONFIRM it:<br />
<br />
<input type="submit" width="350" height="50"
name="CONFIRM" value="CONFIRM"
onClick="document.forms[0].step.value=%(newstep)s;">
<br />
If you think that there is a problem, please contact
<a href="mailto:%(admin-email)s">%(admin-email)s</a>.<br />
</div>
"""
def Ask_For_Record_Details_Confirmation(parameters, \
curdir, \
form, \
user_info=None):
"""
Display the details of a record on which some operation is to be carried
out and prompt for the user's confirmation that it is the correct record.
Upon the clicking of the confirmation button, augment step by one.
Given the "recid" (001) of a record, retrieve the basic metadata
(title, report-number(s) and author(s)) and display them in the
user's browser along with a prompt asking them to confirm that
it is indeed the record that they expected to see.
The function depends upon the presence of the "sysno" global and the
presence of the "step" field in the "form" parameter.
When the user clicks on the "confirm" button, step will be augmented by
1 and the form will be submitted.
@parameters: None.
@return: None.
@Exceptions raise: InvenioWebSubmitFunctionError if problems are
encountered;
InvenioWebSubmitFunctionStop in order to display the details of the
record and the confirmation message.
"""
global sysno
## Make sure that we know the current step:
try:
current_step = int(form['step'])
except TypeError:
## Can't determine step.
msg = "Unable to determine submission step. Cannot continue."
raise InvenioWebSubmitFunctionError(msg)
else:
newstep = current_step + 1
## Make sure that the sysno is valid:
try:
working_recid = int(sysno)
except TypeError:
## Unable to find the details of this record - cannot query the database
msg = "Unable to retrieve details of record - record id was invalid."
raise InvenioWebSubmitFunctionError(msg)
if not record_exists(working_recid):
## Record doesn't exist.
msg = "Unable to retrieve details of record [%s] - record does not " \
"exist." % working_recid
raise InvenioWebSubmitFunctionError(msg)
## Retrieve the details to be displayed:
##
## Author(s):
rec_authors = ""
rec_first_author = print_record(int(sysno), 'tm', "100__a")
rec_other_authors = print_record(int(sysno), 'tm', "700__a")
if rec_first_author != "":
rec_authors += "".join(["%s<br />\n" % cgi.escape(author.strip()) for \
author in rec_first_author.split("\n")])
if rec_other_authors != "":
rec_authors += "".join(["%s<br />\n" % cgi.escape(author.strip()) for \
author in rec_other_authors.split("\n")])
## Title:
rec_title = "".join(["%s<br />\n" % cgi.escape(title.strip()) for title in \
print_record(int(sysno), 'tm', "245__a").split("\n")])
## Report numbers:
rec_reportnums = ""
rec_reportnum = print_record(int(sysno), 'tm', "037__a")
rec_other_reportnums = print_record(int(sysno), 'tm', "088__a")
if rec_reportnum != "":
rec_reportnums += "".join(["%s<br />\n" % cgi.escape(repnum.strip()) \
for repnum in rec_reportnum.split("\n")])
if rec_other_reportnums != "":
rec_reportnums += "".join(["%s<br />\n" % cgi.escape(repnum.strip()) \
for repnum in \
rec_other_reportnums.split("\n")])
raise InvenioWebSubmitFunctionStop(CFG_DOCUMENT_DETAILS_MESSAGE % \
{ 'report-numbers' : rec_reportnums, \
'title' : rec_title, \
'author' : rec_authors, \
'newstep' : newstep, \
'admin-email' : CFG_SITE_ADMIN_EMAIL, \
} )
| NikolaYolov/invenio_backup | modules/websubmit/lib/functions/Ask_For_Record_Details_Confirmation.py | Python | gpl-2.0 | 5,952 |
import sys
from remoteserver import DirectResultRemoteServer
class SpecialErrors(object):
def continuable(self, message, traceback):
return self._special_error(message, traceback, continuable=True)
def fatal(self, message, traceback):
return self._special_error(message, traceback,
fatal='this wins', continuable=42)
def _special_error(self, message, traceback, continuable=False, fatal=False):
return {'status': 'FAIL', 'error': message, 'traceback': traceback,
'continuable': continuable, 'fatal': fatal}
if __name__ == '__main__':
DirectResultRemoteServer(SpecialErrors(), *sys.argv[1:])
| yahman72/robotframework | atest/testdata/standard_libraries/remote/specialerrors.py | Python | apache-2.0 | 689 |
import io
import json
class Stream(object):
__shortname__ = "stream"
"""
This is a base class that should be inherited when implementing
different stream types. Should only be created by plugins.
"""
def __init__(self, session):
self.session = session
def __repr__(self):
return "<Stream()>"
def __json__(self):
return dict(type=type(self).shortname())
def open(self):
"""
Attempts to open a connection to the stream.
Returns a file-like object that can be used to read the stream data.
Raises :exc:`StreamError` on failure.
"""
raise NotImplementedError
@property
def json(self):
obj = self.__json__()
return json.dumps(obj)
@classmethod
def shortname(cls):
return cls.__shortname__
class StreamIO(io.IOBase):
pass
__all__ = ["Stream", "StreamIO"]
| ethanhlc/streamlink | src/streamlink/stream/stream.py | Python | bsd-2-clause | 915 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_dns_cache_resolver
short_description: Manage DNS resolver cache configurations on BIG-IP
description:
- Manage DNS resolver cache configurations on BIG-IP.
version_added: 2.8
options:
name:
description:
- Specifies the name of the cache.
type: str
required: True
answer_default_zones:
description:
- Specifies whether the system answers DNS queries for the default
zones localhost, reverse 127.0.0.1 and ::1, and AS112.
- When creating a new cache resolver, if this parameter is not specified, the
default is C(no).
type: bool
forward_zones:
description:
- Forward zones associated with the cache.
- To remove all forward zones, specify a value of C(none).
suboptions:
name:
description:
- Specifies a FQDN for the forward zone.
type: str
required: True
nameservers:
description:
- Specifies the IP address and service port of a recursive
nameserver that answers DNS queries for the zone when the
response cannot be found in the DNS cache.
suboptions:
address:
description:
- Address of recursive nameserver.
type: str
port:
description:
- Port of recursive nameserver.
- When specifying new nameservers, if this value is not provided, the
default is C(53).
type: int
type: list
type: raw
route_domain:
description:
- Specifies the route domain the resolver uses for outbound traffic.
type: str
state:
description:
- When C(present), ensures that the resource exists.
- When C(absent), ensures the resource is removed.
type: str
choices:
- present
- absent
default: present
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a DNS resolver cache
bigip_dns_cache:
name: foo
answer_default_zones: yes
forward_zones:
- name: foo.bar.com
nameservers:
- address: 1.2.3.4
port: 53
- address: 5.6.7.8
route_domain: 0
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
param1:
description: The new param1 value of the resource.
returned: changed
type: bool
sample: true
param2:
description: The new param2 value of the resource.
returned: changed
type: str
sample: Foo is bar
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'routeDomain': 'route_domain',
'answerDefaultZones': 'answer_default_zones',
'forwardZones': 'forward_zones',
}
api_attributes = [
'routeDomain',
'answerDefaultZones',
'forwardZones',
]
returnables = [
'route_domain',
'answer_default_zones',
'forward_zones',
]
updatables = [
'route_domain',
'answer_default_zones',
'forward_zones',
]
@property
def route_domain(self):
if self._values['route_domain'] is None:
return None
return fq_name(self.partition, self._values['route_domain'])
@property
def answer_default_zones(self):
return flatten_boolean(self._values['answer_default_zones'])
class ApiParameters(Parameters):
@property
def forward_zones(self):
if self._values['forward_zones'] is None:
return None
result = []
for x in self._values['forward_zones']:
tmp = dict(
name=x['name'],
nameservers=[]
)
if 'nameservers' in x:
tmp['nameservers'] = [y['name'] for y in x['nameservers']]
tmp['nameservers'].sort()
result.append(tmp)
return result
class ModuleParameters(Parameters):
@property
def forward_zones(self):
if self._values['forward_zones'] is None:
return None
elif self._values['forward_zones'] in ['', 'none']:
return ''
result = []
for x in self._values['forward_zones']:
if 'name' not in x:
raise F5ModuleError(
"A 'name' key must be provided when specifying a list of forward zones."
)
tmp = dict(
name=x['name'],
nameservers=[]
)
if 'nameservers' in x:
for ns in x['nameservers']:
if 'address' not in ns:
raise F5ModuleError(
"An 'address' key must be provided when specifying a list of forward zone nameservers."
)
item = '{0}:{1}'.format(ns['address'], ns.get('port', 53))
tmp['nameservers'].append(item)
tmp['nameservers'].sort()
result.append(tmp)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def forward_zones(self):
if self._values['forward_zones'] is None:
return None
result = []
for x in self._values['forward_zones']:
tmp = {'name': x['name']}
if 'nameservers' in x:
tmp['nameservers'] = []
for y in x['nameservers']:
tmp['nameservers'].append(dict(name=y))
result.append(tmp)
return result
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def forward_zones(self):
if self.want.forward_zones is None:
return None
if self.have.forward_zones is None and self.want.forward_zones in ['', 'none']:
return None
if self.have.forward_zones is not None and self.want.forward_zones in ['', 'none']:
return []
if self.have.forward_zones is None:
return dict(
forward_zones=self.want.forward_zones
)
want = sorted(self.want.forward_zones, key=lambda x: x['name'])
have = sorted(self.have.forward_zones, key=lambda x: x['name'])
wnames = [x['name'] for x in want]
hnames = [x['name'] for x in have]
if set(wnames) != set(hnames):
return dict(
forward_zones=self.want.forward_zones
)
for idx, x in enumerate(want):
wns = x.get('nameservers', [])
hns = have[idx].get('nameservers', [])
if set(wns) != set(hns):
return dict(
forward_zones=self.want.forward_zones
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/cache/resolver/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/cache/resolver/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/cache/resolver/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/cache/resolver/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/cache/resolver/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
route_domain=dict(),
answer_default_zones=dict(type='bool'),
forward_zones=dict(
type='raw',
options=dict(
name=dict(),
nameservers=dict(
type='list',
elements='dict',
options=dict(
address=dict(),
port=dict(type='int')
)
)
)
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| helldorado/ansible | lib/ansible/modules/network/f5/bigip_dns_cache_resolver.py | Python | gpl-3.0 | 16,802 |