text stringlengths 4 1.02M | meta dict |
|---|---|
import argparse
import os
import sys
import npdocstring
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="npdocstring",
description=(
"generate missing numpy docstrings automatically "
"in Python source files."
),
)
parser.add_argument(
"--input",
"-i",
help="path to input file (stdin if not provided)",
)
parser.add_argument(
"--dir",
"-d",
help="directory where to apply recursively.",
)
parser.add_argument(
"--indentation-spaces",
help="how many indentation spaces are used",
default=4,
type=int,
)
flags = parser.parse_args()
if flags.dir is None:
if flags.input is not None:
if not os.path.isfile(flags.input):
raise FileNotFoundError(flags.input)
else:
with open(flags.input, "r") as f:
file_content = f.read()
else:
file_content = sys.stdin.read()
new_file_content = npdocstring.process_file(
file_content, flags.indentation_spaces
)
sys.stdout.write(new_file_content)
else:
if not os.path.isdir(flags.dir):
print("npdocstring: unknown directory", flags.dir)
else:
for root, dirs, files in os.walk(flags.dir):
for file in files:
if file.endswith(".py"):
path = os.path.join(*([root] + dirs + [file]))
file_content = open(path).read()
new_file_content = npdocstring.process_file(
file_content, flags.indentation_spaces
)
with open(path + "--", "w") as f:
f.write(file_content)
with open(path, "w") as f:
f.write(new_file_content)
print(f"processed {path}")
| {
"content_hash": "7dc0a3164347a0fd03fac21503a1075b",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 70,
"avg_line_length": 33.18032786885246,
"alnum_prop": 0.4925889328063241,
"repo_name": "tgy/npdocstring",
"id": "8a94d321aa40802525abac9c3858f93e4c7b5aaf",
"size": "2024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "npdocstring/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21433"
}
],
"symlink_target": ""
} |
"""Cudnn RNN operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.checkpoint.python import split_dependency
from tensorflow.contrib.rnn.python.ops import lstm_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_cudnn_rnn_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking as trackable_lib
CUDNN_RNN_UNIDIRECTION = "unidirectional"
CUDNN_RNN_BIDIRECTION = "bidirectional"
CUDNN_LSTM = "lstm"
CUDNN_GRU = "gru"
CUDNN_RNN_RELU = "rnn_relu"
CUDNN_RNN_TANH = "rnn_tanh"
# Half for cell input, half for hidden states.
CUDNN_LSTM_PARAMS_PER_LAYER = 8
CUDNN_GRU_PARAMS_PER_LAYER = 6
CUDNN_RNN_TANH_PARAMS_PER_LAYER = 2
CUDNN_RNN_RELU_PARAMS_PER_LAYER = 2
CUDNN_INPUT_LINEAR_MODE = "linear_input"
CUDNN_INPUT_SKIP_MODE = "skip_input"
CUDNN_INPUT_AUTO_MODE = "auto_select"
# pylint:disable=protected-access
_BIAS_VARIABLE_NAME = rnn_cell_impl._BIAS_VARIABLE_NAME
_WEIGHTS_VARIABLE_NAME = rnn_cell_impl._WEIGHTS_VARIABLE_NAME
# pylint:enable=protected-access
class CudnnCompatibleLSTMCell(lstm_ops.LSTMBlockCell):
"""Cudnn Compatible LSTMCell.
A simple wrapper around `tf.contrib.rnn.LSTMBlockCell` to use along with
`tf.contrib.cudnn_rnn.CudnnLSTM`. The latter's params can be used by
this cell seamlessly.
"""
def __init__(self, num_units, reuse=None):
super(CudnnCompatibleLSTMCell, self).__init__(
num_units, forget_bias=0, cell_clip=None, use_peephole=False,
reuse=reuse, name="cudnn_compatible_lstm_cell")
self._names.update({"scope": "cudnn_compatible_lstm_cell"})
class CudnnCompatibleGRUCell(rnn_cell_impl.GRUCell):
r"""Cudnn Compatible GRUCell.
A GRU impl akin to `tf.nn.rnn_cell.GRUCell` to use along with
`tf.contrib.cudnn_rnn.CudnnGRU`. The latter's params can be used by
it seamlessly.
It differs from platform-independent GRUs in how the new memory gate is
calculated. Nvidia picks this variant based on GRU author's[1] suggestion and
the fact it has no accuracy impact[2].
[1] https://arxiv.org/abs/1406.1078
[2] http://svail.github.io/diff_graphs/
Cudnn compatible GRU (from Cudnn library user guide):
```python
# reset gate
$$r_t = \sigma(x_t * W_r + h_t-1 * R_h + b_{Wr} + b_{Rr})$$
# update gate
$$u_t = \sigma(x_t * W_u + h_t-1 * R_u + b_{Wu} + b_{Ru})$$
# new memory gate
$$h'_t = tanh(x_t * W_h + r_t .* (h_t-1 * R_h + b_{Rh}) + b_{Wh})$$
$$h_t = (1 - u_t) .* h'_t + u_t .* h_t-1$$
```
Other GRU (see `tf.nn.rnn_cell.GRUCell` and `tf.contrib.rnn.GRUBlockCell`):
```python
# new memory gate
\\(h'_t = tanh(x_t * W_h + (r_t .* h_t-1) * R_h + b_{Wh})\\)
```
which is not equivalent to Cudnn GRU: in addition to the extra bias term b_Rh,
```python
\\(r .* (h * R) != (r .* h) * R\\)
```
"""
def __init__(self, num_units, reuse=None, kernel_initializer=None):
super(CudnnCompatibleGRUCell, self).__init__(
num_units,
activation=None,
reuse=reuse,
kernel_initializer=kernel_initializer)
def build(self, inputs_shape):
if inputs_shape[1].value is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% inputs_shape)
input_depth = inputs_shape[1].value
self._gate_kernel = self.add_variable(
"gates/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, 2 * self._num_units],
initializer=self._kernel_initializer)
self._gate_bias = self.add_variable(
"gates/%s" % _BIAS_VARIABLE_NAME,
shape=[2 * self._num_units],
initializer=(
self._bias_initializer
if self._bias_initializer is not None
else init_ops.constant_initializer(1.0, dtype=self.dtype)))
self._candidate_input_kernel = self.add_variable(
"candidate/input_projection/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[input_depth, self._num_units],
initializer=self._kernel_initializer)
self._candidate_hidden_kernel = self.add_variable(
"candidate/hidden_projection/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[self._num_units, self._num_units],
initializer=self._kernel_initializer)
self._candidate_input_bias = self.add_variable(
"candidate/input_projection/%s" % _BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=(
self._bias_initializer
if self._bias_initializer is not None
else init_ops.zeros_initializer(dtype=self.dtype)))
self._candidate_hidden_bias = self.add_variable(
"candidate/hidden_projection/%s" % _BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=(
self._bias_initializer
if self._bias_initializer is not None
else init_ops.zeros_initializer(dtype=self.dtype)))
def call(self, inputs, state):
"""Gated recurrent unit (GRU) with nunits cells."""
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, state], 1), self._gate_kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias)
value = math_ops.sigmoid(gate_inputs)
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
candidate = nn_ops.bias_add(
math_ops.matmul(inputs, self._candidate_input_kernel),
self._candidate_input_bias)
candidate += r * nn_ops.bias_add(
math_ops.matmul(state, self._candidate_hidden_kernel),
self._candidate_hidden_bias)
candidate = self._activation(candidate)
new_h = (1-u) * candidate + u * state
return new_h, new_h
class CudnnParamsFormatConverter(object):
"""Abstract class that converts between params of Cudnn Rnn and TF Rnn."""
def __init__(self,
num_layers,
num_units,
input_size,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION):
"""Constructor.
Args:
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_size: the size of the input, it could be different from the
num_units.
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It could be one
of 'linear_input', 'skip_input' or 'auto_select'. * 'linear_input'
(default) always applies a linear projection of input onto RNN hidden
state. (standard RNN behavior). * 'skip_input' is only allowed when
input_size == num_units; * 'auto_select' implies 'skip_input' when
input_size == num_units; otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
"""
self._num_layers = num_layers
self._input_size = input_size
self._num_units = num_units
self._input_mode = input_mode
self._direction = direction
self._num_dirs = 1 if self._direction == CUDNN_RNN_UNIDIRECTION else 2
self._num_params = (
self._num_params_per_layer * self._num_layers * self._num_dirs)
def tf_canonical_to_opaque(self, tf_canonicals):
r"""Converts tf canonical weights to cudnn opaque param."""
cu_weights, cu_biases = self._tf_canonical_to_cu_canonical(tf_canonicals)
cu_weights = [array_ops.reshape(w, [-1]) for w in cu_weights]
opaque_params = self._cu_canonical_to_opaque(cu_weights, cu_biases)
return opaque_params
def opaque_to_tf_canonical(self, opaque_param):
r"""Converts cudnn opaque param to tf canonical weights."""
cu_weights, cu_biases = self._opaque_to_cu_canonical(opaque_param)
weights, biases = self._cu_canonical_to_tf_canonical(cu_weights, cu_biases)
return weights, biases
def _opaque_to_cu_canonical(self, opaque_param):
"""Converts opaque params to Cudnn canonical format.
Args:
opaque_param: An opaque tensor storing cudnn rnn params (weights and
biases).
Returns:
2 list for weights and biases respectively.
"""
with ops.device("/gpu:0"):
weights, biases = gen_cudnn_rnn_ops.cudnn_rnn_params_to_canonical(
num_layers=self._num_layers,
num_units=self._num_units,
input_size=self._input_size,
params=opaque_param,
num_params=self._num_params,
rnn_mode=self._rnn_mode,
input_mode=self._input_mode,
direction=self._direction)
return (weights, biases)
def _cu_canonical_to_opaque(self, cu_weights, cu_biases):
"""Converts from Cudnn canonical format to opaque params.
Args:
cu_weights: a list of tensors, Cudnn canonical weights.
cu_biases: a list of tensors, Cudnn canonical biases.
Returns:
a single opaque tensor.
"""
with ops.device("/gpu:0"):
return gen_cudnn_rnn_ops.cudnn_rnn_canonical_to_params(
num_layers=self._num_layers,
num_units=self._num_units,
input_size=self._input_size,
weights=cu_weights,
biases=cu_biases,
rnn_mode=self._rnn_mode,
input_mode=self._input_mode,
direction=self._direction)
def _cu_canonical_to_tf_canonical(self, cu_weights, cu_biases):
r"""Transform from Cudnn canonical to tf canonical.
The elements of argument lists are laid out in the following format:
------------------------------------------------------------
| weights | biases |
------------------------------------------------------------
\ \
\ \
-------------------------------
| layer1 |layer2 |... |
-------------------------------
\ \
---------------
|fwd |bak |
---------------
Args:
cu_weights: a list of tensors of Cudnn canonical weights.
cu_biases: a list of tensors of Cudnn canonical biases.
Returns:
1 tuple, tf canonical weights and biases.
"""
tf_weights, tf_biases = [], []
layer_weights_num = self._num_params_per_layer * self._num_dirs
layer_biases_num = layer_weights_num
for i in range(self._num_layers):
layer_weights = cu_weights[i * layer_weights_num:(i + 1) *
layer_weights_num]
layer_biases = cu_biases[i * layer_biases_num:(i + 1) * layer_biases_num]
if self._direction == CUDNN_RNN_UNIDIRECTION:
self._cu_canonical_to_tf_canonical_single_layer(
layer_weights, layer_biases, tf_weights, tf_biases)
else:
fw_weights = layer_weights[:len(layer_weights) // 2]
bw_weights = layer_weights[len(layer_weights) // 2:]
fw_biases = layer_biases[:len(layer_biases) // 2]
bw_biases = layer_biases[len(layer_biases) // 2:]
self._cu_canonical_to_tf_canonical_single_layer(
fw_weights,
fw_biases,
tf_weights,
tf_biases,
)
self._cu_canonical_to_tf_canonical_single_layer(
bw_weights,
bw_biases,
tf_weights,
tf_biases,
)
return (tf_weights, tf_biases)
def _cu_canonical_to_tf_canonical_single_layer(self, cu_weights, cu_biases,
tf_weights, tf_biases):
r"""Transform single layer Cudnn canonicals to tf canonicals.
The elements of cu_weights, cu_biases are laid out in the following format:
-------------------------------------------------------------------------
| gate0 param on inputs | gate0 param on hidden state | gate1 ..........|
-------------------------------------------------------------------------
Args:
cu_weights: a list of tensors, single layer weights.
cu_biases: a list of tensors, single layer biases.
tf_weights: a list where transformed weights are stored.
tf_biases: a list where transformed biases are stored.
"""
raise NotImplementedError("Abstract method")
def _tf_canonical_to_cu_canonical(self, tf_canonicals):
r"""Transform from tf canonical to Cudnn canonical.
This is the reverse routine of _TransformCanonical().
Args:
tf_canonicals: a list of tensors of tf canonical params. The elements are
laid out in the following format:
------------------------------------------------------------
| weights | biases |
------------------------------------------------------------
\ \
\ \
-------------------------------
| layer1 |layer2 |... |
-------------------------------
\ \
---------------
|fwd |bak |
---------------
Returns:
2 lists: the recovered cudnn canonical weights and biases.
"""
weights = tf_canonicals[:len(tf_canonicals) // 2]
biases = tf_canonicals[len(tf_canonicals) // 2:]
cu_weights, cu_biases = [], []
layer_weights_num = len(weights) // self._num_layers
layer_biases_num = len(biases) // self._num_layers
for i in range(self._num_layers):
layer_weights = weights[i * layer_weights_num:(i + 1) * layer_weights_num]
layer_biases = biases[i * layer_biases_num:(i + 1) * layer_biases_num]
if self._direction == CUDNN_RNN_UNIDIRECTION:
cu_weights.extend(self._tf_to_cudnn_weights(i, *layer_weights))
cu_biases.extend(self._tf_to_cudnn_biases(*layer_biases))
else:
fw_weights, bw_weights = layer_weights[:len(
layer_weights) // 2], layer_weights[len(layer_weights) // 2:]
fw_biases, bw_biases = layer_biases[:len(
layer_biases) // 2], layer_biases[len(layer_biases) // 2:]
cu_weights.extend(self._tf_to_cudnn_weights(i, *fw_weights))
cu_biases.extend(self._tf_to_cudnn_biases(*fw_biases))
cu_weights.extend(self._tf_to_cudnn_weights(i, *bw_weights))
cu_biases.extend(self._tf_to_cudnn_biases(*bw_biases))
return cu_weights, cu_biases
def _cudnn_to_tf_weights(self, *cu_weights):
r"""Stitches cudnn canonical weights to generate tf canonical weights."""
raise NotImplementedError("Abstract method")
def _tf_to_cudnn_weights(self, layer, *tf_weights):
r"""Reverses the operations in StitchWeights()."""
raise NotImplementedError("Abstract method")
def _cudnn_to_tf_biases(self, *biases):
r"""Stitches cudnn canonical biases to generate tf canonical biases."""
raise NotImplementedError("Abstract method")
def _tf_to_cudnn_biases(self, *tf_biases):
r"""Reverses the operations in StitchBiases()."""
raise NotImplementedError("Abstract method")
class CudnnParamsFormatConverterLSTM(CudnnParamsFormatConverter):
"""Helper class that converts between params of Cudnn and TF LSTM."""
_rnn_mode = CUDNN_LSTM
_num_params_per_layer = CUDNN_LSTM_PARAMS_PER_LAYER
def _cudnn_to_tf_gate_params(self, *cu_gate_order):
i_g, f_g, c_g, o_g = cu_gate_order
return [i_g, c_g, f_g, o_g]
def _tf_to_cudnn_gate_params(self, *tf_gate_order):
i_g, c_g, f_g, o_g = tf_gate_order
return [i_g, f_g, c_g, o_g]
def _cudnn_to_tf_weights(self, *cu_weights):
r"""Stitching cudnn canonical weights to generate tf canonical weights."""
w_i, w_f, w_c, w_o, r_i, r_f, r_c, r_o = cu_weights
# pylint: disable=invalid-name
W_i = array_ops.concat([w_i, r_i], axis=1)
W_f = array_ops.concat([w_f, r_f], axis=1)
W_c = array_ops.concat([w_c, r_c], axis=1)
W_o = array_ops.concat([w_o, r_o], axis=1)
# pylint: enable=invalid-name
# Cudnn LSTM weights are in ifco order, other tf LSTMs are in icfo order.
reordered = self._cudnn_to_tf_gate_params(* [W_i, W_f, W_c, W_o])
return (array_ops.transpose(array_ops.concat(reordered, axis=0)),)
def _tf_to_cudnn_weights(self, layer, *tf_weights):
r"""Reverse the operations in StitchWeights()."""
input_size = self._input_size
num_units = self._num_units
if layer == 0:
input_weight_width = input_size
else:
input_weight_width = num_units
if self._direction == CUDNN_RNN_BIDIRECTION:
input_weight_width *= 2
(tf_weight,) = tf_weights
w = array_ops.transpose(tf_weight)
# pylint: disable=invalid-name
W_i, W_f, W_c, W_o = self._tf_to_cudnn_gate_params(*array_ops.split(
w, 4, axis=0))
w_i, r_i = array_ops.split(W_i, [input_weight_width, num_units], axis=1)
w_c, r_c = array_ops.split(W_c, [input_weight_width, num_units], axis=1)
w_f, r_f = array_ops.split(W_f, [input_weight_width, num_units], axis=1)
w_o, r_o = array_ops.split(W_o, [input_weight_width, num_units], axis=1)
return w_i, w_f, w_c, w_o, r_i, r_f, r_c, r_o
# pylint: enable=invalid-name
def _cudnn_to_tf_biases(self, *cu_biases):
r"""Stitching cudnn canonical biases to generate tf canonical biases."""
b_wi, b_wf, b_wc, b_wo, b_ri, b_rf, b_rc, b_ro = cu_biases
# Save only the sum instead of individual biases. When recovering, return
# two biases each with half the value. Since RNN does not regularize by
# weight decay, it has no side effect in training or inference.
# pylint: disable=invalid-name
B_i = b_wi + b_ri
B_f = b_wf + b_rf
B_c = b_wc + b_rc
B_o = b_wo + b_ro
# pylint: enable=invalid-name
reordered = self._cudnn_to_tf_gate_params(* [B_i, B_f, B_c, B_o])
return (array_ops.concat(reordered, axis=0),)
def _tf_to_cudnn_biases(self, *tf_biases):
r"""Reverse the operations in StitchBiases()."""
(tf_bias,) = tf_biases
# pylint: disable=invalid-name
B_i, B_f, B_c, B_o = self._tf_to_cudnn_gate_params(*array_ops.split(
tf_bias, 4, axis=0))
# pylint: enable=invalid-name
# pylint: disable=unbalanced-tuple-unpacking
b_wi, b_ri = (B_i * 0.5,) * 2
b_wf, b_rf = (B_f * 0.5,) * 2
b_wc, b_rc = (B_c * 0.5,) * 2
b_wo, b_ro = (B_o * 0.5,) * 2
# pylint: enable=unbalanced-tuple-unpacking
# Return ifco order for Cudnn LSTM.
return b_wi, b_wf, b_wc, b_wo, b_ri, b_rf, b_rc, b_ro
def _cu_canonical_to_tf_canonical_single_layer(self, cu_weights, cu_biases,
tf_weights, tf_biases):
(w,) = self._cudnn_to_tf_weights(*cu_weights)
(b,) = self._cudnn_to_tf_biases(*cu_biases)
tf_weights.append(w)
tf_biases.append(b)
class CudnnParamsFormatConverterGRU(CudnnParamsFormatConverter):
"""Helper class that converts between params of Cudnn and TF GRU."""
_rnn_mode = CUDNN_GRU
_num_params_per_layer = CUDNN_GRU_PARAMS_PER_LAYER
_rnn_cell_name = base_layer.to_snake_case(CudnnCompatibleGRUCell.__name__)
def _cudnn_to_tf_weights(self, *cu_weights):
r"""Stitching cudnn canonical weights to generate tf canonical weights."""
w_i, w_r, w_h, r_i, r_r, r_h = cu_weights
# pylint: disable=invalid-name
W_i = array_ops.concat([w_i, r_i], axis=1)
W_r = array_ops.concat([w_r, r_r], axis=1)
# pylint: enable=invalid-name
return (array_ops.transpose(array_ops.concat([W_i, W_r], axis=0)),
array_ops.transpose(w_h), array_ops.transpose(r_h))
def _tf_to_cudnn_weights(self, layer, *tf_weights):
r"""Reverse the operations in StitchWeights()."""
input_size = self._input_size
num_units = self._num_units
if layer == 0:
input_weight_width = input_size
else:
input_weight_width = num_units
if self._direction == CUDNN_RNN_BIDIRECTION:
input_weight_width *= 2
# pylint: disable=invalid-name
W_ir, w_h, r_h = tf_weights
W_ir = array_ops.transpose(W_ir)
w_h = array_ops.transpose(w_h)
r_h = array_ops.transpose(r_h)
W_i, W_r = array_ops.split(W_ir, 2, axis=0)
w_i, r_i = array_ops.split(W_i, [input_weight_width, num_units], axis=1)
w_r, r_r = array_ops.split(W_r, [input_weight_width, num_units], axis=1)
# pylint: enable=invalid-name
return w_i, w_r, w_h, r_i, r_r, r_h
def _cudnn_to_tf_biases(self, *biases):
r"""Stitching cudnn canonical biases to generate tf canonical biases."""
b_wi, b_wr, b_wh, b_ri, b_rr, b_rh = biases
return (
# Save only the sum instead of individual biases. When recovering,
# return two biases each with half the value. Since RNN does not
# regularize by weight decay, it has no side effect in training or
# inference.
array_ops.concat([b_wi, b_wr], axis=0) + array_ops.concat(
[b_ri, b_rr], axis=0),
b_wh,
b_rh)
def _tf_to_cudnn_biases(self, *tf_biases):
r"""Reverse the operations in StitchBiases()."""
# b_ir is the summed bias of reset and update gate.
b_ir, b_wh, b_rh = tf_biases
bi, br = b_ir * 0.5, b_ir * 0.5
b_wi, b_wr = array_ops.split(bi, 2, axis=0)
b_ri, b_rr = array_ops.split(br, 2, axis=0)
return b_wi, b_wr, b_wh, b_ri, b_rr, b_rh
def _cu_canonical_to_tf_canonical_single_layer(self, cu_weights, cu_biases,
tf_weights, tf_biases):
# pylint: disable=invalid-name
W_ir, w_h, r_h = self._cudnn_to_tf_weights(*cu_weights)
b_ir, b_wh, b_rh = self._cudnn_to_tf_biases(*cu_biases)
# pylint: enable=invalid-name
tf_weights.extend([W_ir, w_h, r_h])
tf_biases.extend([b_ir, b_wh, b_rh])
class CudnnParamsFormatConverterBasic(CudnnParamsFormatConverterLSTM):
"""Helper class that converts between params of Cudnn and TF Relu/Tanh RNN."""
def _cudnn_to_tf_weights(self, *cu_weights):
r"""Stitching cudnn canonical weights to generate tf canonical weights."""
w_i, w_h = cu_weights
W = array_ops.concat([w_i, w_h], axis=1) # pylint: disable=invalid-name
return (array_ops.transpose(W),)
def _tf_to_cudnn_weights(self, layer, *tf_weights):
r"""Reverse the operations in StitchWeights()."""
input_size = self._input_size
num_units = self._num_units
if layer == 0:
input_weight_width = input_size
else:
input_weight_width = num_units
if self._direction == CUDNN_RNN_BIDIRECTION:
input_weight_width *= 2
(tf_weight,) = tf_weights
# pylint: disable=invalid-name
W = array_ops.transpose(tf_weight)
w_i, w_h = array_ops.split(W, [input_weight_width, num_units], axis=1)
return w_i, w_h
# pylint: enable=invalid-name
def _cudnn_to_tf_biases(self, *cu_biases):
r"""Stitching cudnn canonical biases to generate tf canonical biases."""
# Save only the sum instead of individual biases. When recovering, return
# two biases each with half the value. Since RNN does not regularize by
# weight decay, it has no side effect in training or inference.
b_wi, b_wh = cu_biases
return (b_wi + b_wh,)
def _tf_to_cudnn_biases(self, *tf_biases):
r"""Reverse the operations in StitchBiases()."""
(tf_bias,) = tf_biases
b_i = tf_bias * 0.5
b_h = tf_bias * 0.5
return b_i, b_h
class CudnnParamsFormatConverterTanh(CudnnParamsFormatConverterBasic):
"""Helper class that converts between params of Cudnn and TF Tanh RNN."""
_rnn_mode = CUDNN_RNN_TANH
_num_params_per_layer = CUDNN_RNN_TANH_PARAMS_PER_LAYER
class CudnnParamsFormatConverterRelu(CudnnParamsFormatConverterBasic):
"""Helper class that converts between params of Cudnn and TF Relu RNN."""
_rnn_mode = CUDNN_RNN_RELU
_num_params_per_layer = CUDNN_RNN_RELU_PARAMS_PER_LAYER
# TODO(yaozhang): make sure we only save the canonical version of params and
# don't save the platform-specific version to avoid potential race
# conditions where params is updated by both versions when being restored.
# Currently, checkpointing will function properly, despite that we save both
# versions, because Saver restores customized savables after Variables.
# However, it is good to not rely on this restoring order of Saver and to
# avoid unnecessary storage. Add a test to check only the canonical version is
# saved.
class CudnnOpaqueParamsSaveable(saver.BaseSaverBuilder.SaveableObject):
"""Abstract SaveableObject implementation handling Cudnn opaque params."""
def __init__(self,
opaque_params,
num_layers,
num_units,
input_size,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
scope=None,
name="cudnn_rnn_saveable"):
"""Creates a CudnnOpaqueParamsSaveable object.
CudnnOpaqueParamsSaveable is saveable/restorable in a checkpoint file
and is used to save/restore the weights and biases parameters in a
canonical format which is directly consumable by platform-independent tf
RNN cells. Parameters are saved as tensors layer by layer with weight
tensors followed by bias tensors, and forward direction followed by
backward direction (if applicable). When restoring, a user could name
param_variables as desired, and restore weight and bias tensors to these
variables.
For CudnnRNNRelu or CudnnRNNTanh, there are 2 tensors per weight and per
bias for each layer: tensor 0 is applied to the input from the previous
layer and tensor 1 to the recurrent input.
For CudnnLSTM, there are 8 tensors per weight and per bias for each
layer: tensor 0-3 are applied to the input from the previous layer and
tensor 4-7 to the recurrent input. Tensor 0 and 4 are for the input gate;
tensor 1 and 5 the forget gate; tensor 2 and 6 the new memory gate;
tensor 3 and 7 the output gate.
For CudnnGRU, there are 6 tensors per weight and per bias for each layer:
tensor 0-2 are applied to the input from the previous layer and
tensor 3-5 to the recurrent input. Tensor 0 and 3 are for the reset gate;
tensor 1 and 4 the update gate; tensor 2 and 5 the new memory gate.
Args:
opaque_params: a variable, Cudnn RNN opaque params.
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_size: the size of the input, it could be different from the
num_units.
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It could be
'linear_input', 'skip_input' or 'auto_select'. 'linear_input' (default)
always applies a linear projection of input onto RNN hidden state.
(standard RNN behavior). 'skip_input' is only allowed when input_size ==
num_units; 'auto_select' implies 'skip_input' when input_size ==
num_units; otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
scope: string of VariableScope, the scope of equivalent subgraph
consisting only platform-independent tf RNN cells.
name: the name of the CudnnOpaqueParamsSaveable object.
"""
# Define in subclasses.
self._num_layers = num_layers
self._input_size = input_size
self._num_units = num_units
self._input_mode = input_mode
self._direction = direction
if scope is not None:
scope_name = scope.name if isinstance(scope, vs.VariableScope) else scope
self._scope = scope_name or None
else:
self._scope = None
self._variables = opaque_params
self._num_dirs = 1 if self._direction == CUDNN_RNN_UNIDIRECTION else 2
# Defined in subclasses.
self._format_converter = None
tf_weights, tf_biases = (
self.format_converter.opaque_to_tf_canonical(self._variables))
tf_weight_names, tf_bias_names = self._tf_canonical_names()
# We currently don't use slice_spec. It might be useful in a distributed
# setting where each parameter server node stores a slice of variable,
# instead of having the master pull all slices and then save them.
slice_spec = ""
params = tf_weights + tf_biases
self._weight_names = tf_weight_names
self._bias_names = tf_bias_names
self._param_names = tf_weight_names + tf_bias_names
prefixed_param_names = tf_weight_names + tf_bias_names
if self._scope:
prefixed_param_names = [
"%s/%s" % (self._scope, pn) for pn in prefixed_param_names
]
specs = [
saver.BaseSaverBuilder.SaveSpec(param, slice_spec, param_name)
for param, param_name in zip(params, prefixed_param_names)
]
super(CudnnOpaqueParamsSaveable, self).__init__(
array_ops.identity(self._variables), specs, name)
@property
def format_converter(self):
if self._format_converter is None:
self._format_converter = self._format_converter_cls(
self._num_layers, self._num_units, self._input_size, self._input_mode,
self._direction)
return self._format_converter
def restore(self, restored_tensors, restored_shapes):
opaque_params = self.format_converter.tf_canonical_to_opaque(
restored_tensors)
return state_ops.assign(
self._variables, opaque_params, validate_shape=False)
def _trackable_save(self, save_buffer):
weights, biases = self.format_converter.opaque_to_tf_canonical(
self._variables)
for name, tensor in zip(self._param_names, weights + biases):
save_buffer[name] = array_ops.identity(tensor)
def _trackable_restore(self, restore_buffer):
tensors = [
array_ops.identity(restore_buffer[name]) for name in self._param_names
]
return self.restore(
restored_tensors=tensors,
restored_shapes=None # Unused
)
def _add_trackable_dependencies(self, trackable, dtype):
"""Add canonical weight dependencies to `trackable`.
When saving or restoring, converts to or from the opaque buffer
format. Weights are saved and loaded in the configuration expected by
cuDNN-compatible cells.
Args:
trackable: An object inheriting from `Trackable` to add
dependencies too (typically the cuDNN `Layer`).
dtype: The dtype for the canonical parameter Tensors.
"""
split_dependencies = split_dependency.split_dependency(
component_names=self._param_names,
component_dtypes=(dtype,) * len(self._param_names),
fill_save_buffer_fn=self._trackable_save,
consume_restore_buffer_fn=self._trackable_restore)
self._trackable_track_params(trackable, split_dependencies)
def _trackable_track_params(self, trackable, params):
"""Tracks parameters in a canonical configuration."""
return # NotImplementedError raised by the Layer.
def _tf_canonical_names(self):
tf_weights_names, tf_biases_names = [], []
for i in range(self._num_layers):
if self._direction == CUDNN_RNN_UNIDIRECTION:
prefix = self._tf_canonical_name_prefix(i)
self._tf_canonical_names_single_layer(prefix, tf_weights_names,
tf_biases_names)
else:
fwd_prefix = self._tf_canonical_name_prefix(i, is_fwd=True)
bak_prefix = self._tf_canonical_name_prefix(i, is_fwd=False)
self._tf_canonical_names_single_layer(fwd_prefix, tf_weights_names,
tf_biases_names)
self._tf_canonical_names_single_layer(bak_prefix, tf_weights_names,
tf_biases_names)
return tf_weights_names, tf_biases_names
def _tf_canonical_name_prefix(self, layer, is_fwd=True):
if self._direction == CUDNN_RNN_UNIDIRECTION:
return "rnn/multi_rnn_cell/cell_%d/%s" % (layer, self._rnn_cell_name)
else:
if is_fwd:
return ("stack_bidirectional_rnn/cell_%d/bidirectional_rnn/fw/%s" %
(layer, self._rnn_cell_name))
else:
return ("stack_bidirectional_rnn/cell_%d/bidirectional_rnn/bw/%s" %
(layer, self._rnn_cell_name))
def _tf_canonical_names_single_layer(self, prefix, tf_weights_names,
tf_biases_names):
raise NotImplementedError("Abstract method")
class CudnnLSTMSaveable(CudnnOpaqueParamsSaveable):
"""SaveableObject implementation handling Cudnn LSTM opaque params."""
_format_converter_cls = CudnnParamsFormatConverterLSTM
_rnn_cell_name = base_layer.to_snake_case(CudnnCompatibleLSTMCell.__name__)
def _tf_canonical_names_single_layer(self, prefix, tf_weights_names,
tf_bias_names):
tf_weights_names.append(prefix + "/kernel")
tf_bias_names.append(prefix + "/bias")
def _trackable_track_params(self, trackable, params):
"""Track parameters for compatibility with CudnnCompatibleLSTMCell."""
biases = []
weights = []
for name in self._weight_names:
weights.append(params[name])
for name in self._bias_names:
biases.append(params[name])
assert len(params) == len(weights) + len(biases)
if len(weights) == 1 and len(biases) == 1:
# For single-layer cells, allow substituting a cell with no MultiRNNCell
# wrapping.
kernel, = weights # pylint: disable=unbalanced-tuple-unpacking
bias, = biases # pylint: disable=unbalanced-tuple-unpacking
trackable._track_trackable(kernel, name="kernel") # pylint: disable=protected-access
trackable._track_trackable(bias, name="bias") # pylint: disable=protected-access
assert len(biases) == len(weights)
for cell_index, (bias, kernel) in enumerate(zip(biases, weights)):
cell = trackable_lib.AutoTrackable()
trackable._track_trackable(cell, name="cell-%d" % cell_index) # pylint: disable=protected-access
cell.bias = bias
cell.kernel = kernel
class CudnnGRUSaveable(CudnnOpaqueParamsSaveable):
"""SaveableObject implementation handling Cudnn GRU opaque params."""
_format_converter_cls = CudnnParamsFormatConverterGRU
_rnn_cell_name = base_layer.to_snake_case(CudnnCompatibleGRUCell.__name__)
def _tf_canonical_names_single_layer(self, prefix, tf_weights_names,
tf_bias_names):
tf_weights_names.append(prefix + "/gates/kernel")
tf_weights_names.append(prefix + "/candidate/input_projection/kernel")
tf_weights_names.append(prefix + "/candidate/hidden_projection/kernel")
tf_bias_names.append(prefix + "/gates/bias")
tf_bias_names.append(prefix + "/candidate/input_projection/bias")
tf_bias_names.append(prefix + "/candidate/hidden_projection/bias")
class CudnnRNNTanhSaveable(CudnnLSTMSaveable):
_format_converter_cls = CudnnParamsFormatConverterTanh
_rnn_cell_name = base_layer.to_snake_case(rnn_cell_impl.BasicRNNCell.__name__)
class CudnnRNNReluSaveable(CudnnLSTMSaveable):
_format_converter_cls = CudnnParamsFormatConverterRelu
_rnn_cell_name = base_layer.to_snake_case(rnn_cell_impl.BasicRNNCell.__name__)
_cudnn_rnn_common_doc_string = """
Cudnn RNN has an opaque parameter buffer that can be used for inference and
training. But it is possible that the layout of the parameter buffers
changes between generations. So it is highly recommended to use
CudnnOpaqueParamsSaveable to save and restore weights and biases in a
canonical format.
This is a typical use case:
* The user creates a CudnnRNN model.
* The user query that parameter buffer size.
* The user creates a variable of that size that serves as the parameter
buffers.
* The user either initialize the parameter buffer, or load the canonical
weights into the parameter buffer.
* The user calls the model with the parameter buffer for inference, or
training.
* If training, the user creates a Saver object.
* If training, the user creates a CudnnOpaqueParamsSaveable object from the
parameter buffer for it to be later saved in the canonical format. When
creating a CudnnOpaqueParamsSaveable object, a name could be provided,
which is useful in distinguishing the names of multiple
CudnnOpaqueParamsSaveable objects (e.g. for an encoder-decoder model).
* Once a while, the user saves the parameter buffer into model checkpoints
with Saver.save().
* When restoring, the user creates a CudnnOpaqueParamsSaveable object and
uses Saver.restore() to restore the parameter buffer from the canonical
format to a user-defined format, as well as to restore other savable
objects in the checkpoint file.
"""
def _check_rnn_mode(rnn_mode):
if rnn_mode not in (CUDNN_LSTM, CUDNN_GRU, CUDNN_RNN_TANH, CUDNN_RNN_RELU):
raise ValueError("Invalid rnn_mode: %s, expect one of (%s, %s, %s, %s)" %
(rnn_mode, CUDNN_LSTM, CUDNN_GRU, CUDNN_RNN_TANH,
CUDNN_RNN_RELU))
def _get_seed(seed):
seed, seed2 = random_seed.get_seed(seed)
if seed is None and seed2 is None:
seed, seed2 = 0, 0
return seed, seed2
def check_direction(direction):
"""Check validity of direction."""
if direction not in (CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION):
raise ValueError("Invalid direction: %s, expecting %s or %s" %
(direction, CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION))
def check_input_mode(input_mode):
if input_mode not in (CUDNN_INPUT_LINEAR_MODE, CUDNN_INPUT_SKIP_MODE,
CUDNN_INPUT_AUTO_MODE):
raise ValueError("Invalid input_mode: %s, expect one of (%s, %s, %s)" %
(input_mode, CUDNN_INPUT_LINEAR_MODE,
CUDNN_INPUT_SKIP_MODE, CUDNN_INPUT_AUTO_MODE))
def _get_num_params(rnn_mode, num_layers, direction):
"""Return num params for given Cudnn config."""
if rnn_mode == CUDNN_LSTM:
num_params_per_layer = CUDNN_LSTM_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_GRU:
num_params_per_layer = CUDNN_GRU_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_RNN_RELU:
num_params_per_layer = CUDNN_RNN_RELU_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_RNN_TANH:
num_params_per_layer = CUDNN_RNN_TANH_PARAMS_PER_LAYER
else:
raise ValueError("Invalid \'rnn_mode\': %s" % rnn_mode)
num_params = num_layers * num_params_per_layer
if direction != CUDNN_RNN_UNIDIRECTION:
num_params *= 2
return num_params
def _cudnn_rnn(inputs,
input_h,
input_c,
params,
is_training,
rnn_mode,
sequence_lengths=None,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
seed=0,
name=None):
"""Cudnn RNN.
Args:
inputs: the input sequence to the RNN model. A Tensor of shape [?,
batch_size, input_size].
input_h: the initial hidden state for h. A Tensor of shape [num_layers,
batch_size, num_units].
input_c: the initial hidden state for c. This is only relevant for LSTM.
A Tensor of the same shape as input_h.
params: the parameter buffer created for this model.
is_training: whether this operation will be used in training or inference
rnn_mode: one of ('lstm', 'gru', 'rnn_relu', 'rnn_tanh').
sequence_lengths: an int32 array representing the variable sequence lengths
in a batch. The size of the array has to equal the batch_size. Default to
None, in which case sequences in the batch are assumed to have the same
length, which is inferred from inputs.
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It could be
'linear_input', 'skip_input' or 'auto_select'.
'linear_input' (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior).
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
outputs, output_h, output_c
"""
_check_rnn_mode(rnn_mode)
check_direction(direction)
check_input_mode(input_mode)
seed, seed2 = random_seed.get_seed(seed)
# TODO(jamesqin): switch default value to "1" on May 25th 2018, and get rid
# of V1 ops.
use_cudnn_v2 = os.environ.get("TF_CUDNN_RNN_USE_V2", "0")
args = {
"input": inputs,
"input_h": input_h,
"input_c": input_c,
"params": params,
"is_training": is_training,
"rnn_mode": rnn_mode,
"input_mode": input_mode,
"direction": direction,
"dropout": dropout,
"seed": seed,
"seed2": seed2,
"name": name
}
if sequence_lengths is not None:
args["sequence_lengths"] = sequence_lengths
outputs, output_h, output_c, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3(**args)
elif use_cudnn_v2 != "1":
outputs, output_h, output_c, _ = gen_cudnn_rnn_ops.cudnn_rnn(**args)
else:
outputs, output_h, output_c, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv2(**args)
return (outputs, output_h, output_c)
def cudnn_lstm(inputs,
input_h,
input_c,
params,
is_training,
sequence_lengths=None,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
seed=0,
name=None):
"""Cudnn LSTM.
Args:
inputs: the input sequence to the RNN model. A Tensor of shape [?,
batch_size, input_size].
input_h: the initial hidden state for h. A Tensor of shape [num_layers,
batch_size, num_units].
input_c: the initial hidden state for c. This is only relevant for LSTM.
A Tensor of the same shape as input_h.
params: the parameter buffer created for this model.
is_training: whether this operation will be used in training or inference
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It could be
'linear_input', 'skip_input' or 'auto_select'.
'linear_input' (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior).
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
sequence_lengths: an int32 array representing the variable sequence lengths
in a batch. The size of the array has to equal the batch_size. Default to
None, in which case sequences in the batch are assumed to have the same
length, which is inferred from inputs.
name: name of the operation.
Returns:
outputs, output_h, output_c
"""
return _cudnn_rnn(inputs, input_h, input_c, params, is_training, CUDNN_LSTM,
sequence_lengths, input_mode, direction, dropout, seed,
name)
def _cudnn_rnn_no_input_c(inputs,
input_h,
params,
is_training,
rnn_mode,
sequence_lengths=None,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
seed=0,
name=None):
"""Cudnn RNN w/o input_c.
Args:
inputs: the input sequence to the RNN model. A Tensor of shape [?,
batch_size, input_size].
input_h: the initial hidden state for h. A Tensor of shape [num_layers,
batch_size, num_units].
params: the parameter buffer created for this model.
is_training: whether this operation will be used in training or inference
rnn_mode: one of ('lstm', 'gru', 'rnn_relu', 'rnn_tanh').
sequence_lengths: an int32 array representing the variable sequence lengths
in a batch. The size of the array has to equal the batch_size. Default to
None, in which case sequences in the batch are assumed to have the same
length, which is inferred from inputs.
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It could be
'linear_input', 'skip_input' or 'auto_select'.
'linear_input' (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior).
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
outputs, output_h
"""
input_c = array_ops.constant([], dtype=input_h.dtype)
outputs, output_h, _ = _cudnn_rnn(inputs, input_h, input_c, params,
is_training, rnn_mode, sequence_lengths,
input_mode, direction, dropout, seed, name)
return outputs, output_h
def cudnn_gru(inputs,
input_h,
params,
is_training,
sequence_lengths=None,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
seed=0,
name=None):
"""Cudnn GRU.
Args:
inputs: the input sequence to the RNN model. A Tensor of shape [?,
batch_size, input_size].
input_h: the initial hidden state for h. A Tensor of shape [num_layers,
batch_size, num_units].
params: the parameter buffer created for this model.
is_training: whether this operation will be used in training or inference
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It could be
'linear_input', 'skip_input' or 'auto_select'.
'linear_input' (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior).
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
sequence_lengths: an int32 array representing the variable sequence lengths
in a batch. The size of the array has to equal the batch_size. Default to
None, in which case sequences in the batch are assumed to have the same
length, which is inferred from inputs.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
outputs, output_h
"""
return _cudnn_rnn_no_input_c(inputs, input_h, params, is_training, CUDNN_GRU,
sequence_lengths, input_mode, direction, dropout,
seed, name)
def cudnn_rnn_relu(inputs,
input_h,
params,
is_training,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
seed=0,
sequence_lengths=None,
name=None):
"""Cudnn RNN Relu.
Args:
inputs: the input sequence to the RNN model. A Tensor of shape [?,
batch_size, input_size].
input_h: the initial hidden state for h. A Tensor of shape [num_layers,
batch_size, num_units].
params: the parameter buffer created for this model.
is_training: whether this operation will be used in training or inference
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It could be
'linear_input', 'skip_input' or 'auto_select'. 'linear_input' (default)
always applies a linear projection of input onto RNN hidden state.
(standard RNN behavior). 'skip_input' is only allowed when input_size ==
num_units; 'auto_select' implies 'skip_input' when input_size ==
num_units; otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
sequence_lengths: an int32 array representing the variable sequence lengths
in a batch. The size of the array has to equal the batch_size. If not
provided, the same sequence length will be assumed.
name: name of the operation.
Returns:
outputs, output_h
"""
return _cudnn_rnn_no_input_c(inputs, input_h, params, is_training,
CUDNN_RNN_RELU, sequence_lengths, input_mode,
direction, dropout, seed, name)
def cudnn_rnn_tanh(inputs,
input_h,
params,
is_training,
sequence_lengths=None,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
seed=0,
name=None):
"""Cudnn RNN Tanh.
Args:
inputs: the input sequence to the RNN model. A Tensor of shape [?,
batch_size, input_size].
input_h: the initial hidden state for h. A Tensor of shape [num_layers,
batch_size, num_units].
params: the parameter buffer created for this model.
is_training: whether this operation will be used in training or inference
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It could be
'linear_input', 'skip_input' or 'auto_select'.
'linear_input' (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior).
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
sequence_lengths: an int32 array representing the variable sequence lengths
in a batch. The size of the array has to equal the batch_size. Default to
None, in which case sequences in the batch are assumed to have the same
length, which is inferred from inputs.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
outputs, output_h
"""
return _cudnn_rnn_no_input_c(inputs, input_h, params, is_training,
CUDNN_RNN_TANH, sequence_lengths, input_mode,
direction, dropout, seed, name)
def cudnn_rnn_opaque_params_to_canonical(rnn_mode,
num_layers,
num_units,
input_size,
params,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0,
seed=0,
name=None):
"""Convert cudnn opaque params to canonical.
Args:
rnn_mode: a string specifies the mode, under which this RNN model runs.
Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_size: the size of the input, it could be different from the
num_units.
params: opaque cudnn params var.
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It could be
'linear_input', 'skip_input' or 'auto_select'.
'linear_input' (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior).
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
weights list and bias list
Raises:
ValueError: if rnn_mode or direction is invalid.
"""
_check_rnn_mode(rnn_mode)
check_direction(direction)
check_input_mode(input_mode)
num_params = _get_num_params(rnn_mode, num_layers, direction)
seed, seed2 = random_seed.get_seed(seed)
weights, biases = gen_cudnn_rnn_ops.cudnn_rnn_params_to_canonical(
rnn_mode=rnn_mode,
num_layers=num_layers,
num_units=num_units,
input_size=input_size,
params=params,
input_mode=input_mode,
direction=direction,
dropout=dropout,
seed=seed,
seed2=seed2,
num_params=num_params,
name=name)
return weights, biases
def cudnn_rnn_canonical_to_opaque_params(rnn_mode,
num_layers,
num_units,
input_size,
weights,
biases,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0,
seed=0,
name=None):
"""Converts params from the canonical format to a specific format of cuDNN.
Args:
rnn_mode: a string specifies the mode, under which this RNN model runs.
Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_size: the size of the input, it could be different from the
num_units.
weights: a Tensor for weight parameters.
biases: a Tensor for bias parameters.
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It could be
'linear_input', 'skip_input' or 'auto_select'.
'linear_input' (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior).
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
an opaque Cudnn param.
Raises:
ValueError: if rnn_mode or direction is invalid.
"""
_check_rnn_mode(rnn_mode)
check_direction(direction)
check_input_mode(input_mode)
seed, seed2 = random_seed.get_seed(seed)
return gen_cudnn_rnn_ops.cudnn_rnn_canonical_to_params(
rnn_mode=rnn_mode,
num_layers=num_layers,
num_units=num_units,
input_size=input_size,
weights=weights,
biases=biases,
input_mode=input_mode,
direction=direction,
dropout=dropout,
seed=seed,
seed2=seed2,
name=name)
def cudnn_rnn_opaque_params_size(rnn_mode,
num_layers,
num_units,
input_size,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dtype=dtypes.float32,
dropout=0,
seed=0,
name=None):
"""Returns opaque params size for specific Cudnn config.
Args:
rnn_mode: a string specifies the mode, under which this RNN model runs.
Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_size: the size of the input, it could be different from the
num_units.
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It could be
'linear_input', 'skip_input' or 'auto_select'.
'linear_input' (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior).
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dtype: one of tf.float32 or tf.float64.
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
a int, size of Cudnn opaque params.
Raises:
ValueError: if rnn_mode or direction is invalid.
"""
_check_rnn_mode(rnn_mode)
check_direction(direction)
check_input_mode(input_mode)
seed, seed2 = random_seed.get_seed(seed)
return gen_cudnn_rnn_ops.cudnn_rnn_params_size(
rnn_mode=rnn_mode,
num_layers=num_layers,
num_units=num_units,
input_size=input_size,
T=dtype,
S=dtypes.int32,
dropout=dropout,
seed=seed,
seed2=seed2,
input_mode=input_mode,
direction=direction,
name=name)[0]
class _CudnnRNN(object):
"""Creates an RNN model using the underlying Cudnn implementation.
Note that self._NUM_PARAMS_PER_LAYER is the number of parameter sets of
weight and bias per layer. It needs to be defined in subclasses.
"""
__doc__ += _cudnn_rnn_common_doc_string
# TODO(jamesqin): support float16 CuDNN RNN
def __init__(self,
rnn_mode,
num_layers,
num_units,
input_size,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dtype=dtypes.float32,
dropout=0.,
seed=0):
"""Creates a CudnnRNN model from model spec.
Args:
rnn_mode: a string specifies the mode, under which this RNN model runs.
Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_size: the size of the input, it could be different from the
num_units.
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It could be
'linear_input', 'skip_input' or 'auto_select'.
'linear_input' (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior).
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dtype: dtype of params, tf.float32 or tf.float64.
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
Raises:
ValueError: if direction is invalid.
"""
self._num_layers = num_layers
self._num_units = num_units
self._input_size = input_size
self._rnn_mode = rnn_mode
self._input_mode = input_mode
self._direction = direction
self._dtype = dtype
self._dropout = dropout
self._seed = seed
@property
def input_mode(self):
return self._input_mode
@property
def input_size(self):
return self._input_size
@property
def num_units(self):
return self._num_units
@property
def num_layers(self):
return self._num_layers
@property
def rnn_mode(self):
return self._rnn_mode
@property
def direction(self):
return self._direction
def params_size(self):
"""Calculates the size of the opaque parameter buffer needed for this model.
Returns:
The calculated parameter buffer size.
"""
return cudnn_rnn_opaque_params_size(
rnn_mode=self._rnn_mode,
num_layers=self._num_layers,
num_units=self._num_units,
input_size=self._input_size,
dtype=self._dtype,
dropout=self._dropout,
seed=self._seed,
input_mode=self._input_mode,
direction=self._direction)
def __call__(self,
input_data,
input_h,
input_c,
params,
is_training=True,
sequence_lengths=None):
"""Runs the forward step for the RNN model.
Args:
input_data: the input sequence to the RNN model. A Tensor of shape [?,
batch_size, input_size].
input_h: the initial hidden state for h. A Tensor of shape [num_layers,
batch_size, num_units].
input_c: the initial hidden state for c. This is only relevant for LSTM.
A Tensor of the same shape as input_h.
params: the parameter buffer created for this model.
is_training: whether this operation will be used in training or inference.
sequence_lengths: an int32 array representing the variable sequence
lengths in a batch. The size of the array has to equal the batch_size.
Default to None, in which case sequences in the batch are assumed to
have the same length, which is inferred from inputs.
Returns:
output: the output sequence.
output_h: the final state for h.
output_c: the final state for c. This is only relevant for LSTM.
"""
return _cudnn_rnn(
input_data,
input_h,
input_c,
params,
is_training,
self._rnn_mode,
sequence_lengths=sequence_lengths,
input_mode=self._input_mode,
direction=self._direction,
dropout=self._dropout,
seed=self._seed)
def params_to_canonical(self, params):
"""Converts params from a specific format of cuDNN to the canonical format.
Args:
params: a Variable for weight and bias parameters.
Returns:
A function for the specific-to-canonical conversion.
"""
return cudnn_rnn_opaque_params_to_canonical(
rnn_mode=self._rnn_mode,
num_layers=self._num_layers,
num_units=self._num_units,
input_size=self._input_size,
params=params,
input_mode=self._input_mode,
direction=self._direction,
dropout=self._dropout,
seed=self._seed)
def canonical_to_params(self, weights, biases):
"""Converts params from the canonical format to a specific format of cuDNN.
Args:
weights: a Tensor for weight parameters.
biases: a Tensor for bias parameters.
Returns:
A function for the canonical-to-params-to-specific conversion..
"""
return cudnn_rnn_canonical_to_opaque_params(
rnn_mode=self._rnn_mode,
num_layers=self._num_layers,
num_units=self._num_units,
input_size=self._input_size,
weights=weights,
biases=biases,
input_mode=self._input_mode,
direction=self._direction,
dropout=self._dropout,
seed=self._seed)
class CudnnLSTM(_CudnnRNN):
"""Cudnn implementation of the LSTM model."""
__doc__ += _cudnn_rnn_common_doc_string
# 4 sets of weight and bias parameters for the recurrent input, and 4 for the
# previous layer input.
_NUM_PARAMS_PER_LAYER = CUDNN_LSTM_PARAMS_PER_LAYER
def __init__(self,
num_layers,
num_units,
input_size,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dtype=dtypes.float32,
dropout=0.,
seed=0):
"""Creates a Cudnn LSTM model from model spec.
Args:
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_size: the size of the input, it could be different from the
num_units.
input_mode: indicate whether there is a linear projection between the
input and The actual computation before the first layer. It could be
'skip_input', 'linear_input' or 'auto_select'.
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dtype: dtype of params, tf.float32 or tf.float64.
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the seed used for initializing dropout.
"""
super(CudnnLSTM, self).__init__(
CUDNN_LSTM,
num_layers,
num_units,
input_size,
input_mode=input_mode,
direction=direction,
dtype=dtype,
dropout=dropout,
seed=seed)
def __call__(self,
input_data,
input_h,
input_c,
params,
sequence_lengths=None,
is_training=True):
"""Runs the forward step for the Cudnn LSTM model.
Args:
input_data: the input sequence to the LSTM model. A Tensor of shape [?,
batch_size, input_size].
input_h: the initial hidden state for h. A Tensor of shape [num_layers,
batch_size, num_units].
input_c: the initial hidden state for c. A Tensor of the same shape as
input_h.
params: the parameter buffer created for this model.
sequence_lengths: an int32 array representing the variable sequence
lengths in a batch. The size of the array has to equal the batch_size.
Default to None, in which case sequences in the batch are assumed to
have the same length, which is inferred from inputs.
is_training: whether this operation will be used in training or inference.
Returns:
output: the output sequence.
output_h: the final state for h.
output_c: the final state for c.
"""
output, output_h, output_c = super(CudnnLSTM, self).__call__(
input_data,
input_h,
input_c,
params,
sequence_lengths=sequence_lengths,
is_training=is_training)
return (output, output_h, output_c)
class _CudnnRNNNoInputC(_CudnnRNN):
"""Simple CudnnRNN models without input_c."""
__doc__ += _cudnn_rnn_common_doc_string
def __init__(self,
num_layers,
num_units,
input_size,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dtype=dtypes.float32,
dropout=0.,
seed=0):
"""Creates a Cudnn RNN model from model without hidden-state C.
Args:
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_size: the size of the input, it could be different from the
num_units.
input_mode: indicate whether there is a linear projection between the
input and The actual computation before the first layer. It could be
'skip_input', 'linear_input' or 'auto_select'.
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dtype: dtype of params, tf.float32 or tf.float64.
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the seed used for initializing dropout.
Raises:
ValueError: if direction is not 'unidirectional' or 'bidirectional'.
"""
if direction not in (CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION):
raise ValueError("Invalid direction: %s" % direction)
super(_CudnnRNNNoInputC, self).__init__(
self._rnn_mode,
num_layers,
num_units,
input_size,
input_mode=input_mode,
direction=direction,
dtype=dtype,
dropout=dropout,
seed=seed)
def __call__(self,
input_data,
input_h,
params,
sequence_lengths=None,
is_training=True):
"""Runs the forward step for the Cudnn LSTM model.
Args:
input_data: the input sequence to the RNN model. A Tensor of shape [?,
batch_size, input_size].
input_h: the initial hidden state for h. A Tensor of shape [num_layers,
batch_size, num_units].
params: the parameter buffer created for this model.
sequence_lengths: an int32 array representing the variable sequence
lengths in a batch. The size of the array has to equal the batch_size.
Default to None, in which case sequences in the batch are assumed to
have the same length, which is inferred from inputs.
is_training: whether this operation will be used in training or inference.
Returns:
output: the output sequence.
output_h: the final state for h.
"""
return _cudnn_rnn_no_input_c(
input_data,
input_h,
params,
is_training,
self._rnn_mode,
sequence_lengths=sequence_lengths,
input_mode=self._input_mode,
direction=self._direction,
dropout=self._dropout,
seed=self._seed)
class CudnnGRU(_CudnnRNNNoInputC):
"""Cudnn implementation of the GRU model."""
__doc__ += _cudnn_rnn_common_doc_string
_rnn_mode = CUDNN_GRU
# 3 sets of weight and bias parameters for the recurrent input, and 3 for the
# previous layer input.
_NUM_PARAMS_PER_LAYER = CUDNN_GRU_PARAMS_PER_LAYER
class CudnnRNNTanh(_CudnnRNNNoInputC):
"""Cudnn implementation of the RNN-tanh model."""
__doc__ += _cudnn_rnn_common_doc_string
_rnn_mode = CUDNN_RNN_TANH
# 1 set of weight and bias parameters for the recurrent input, and 1 for the
# previous layer input.
_NUM_PARAMS_PER_LAYER = CUDNN_RNN_TANH_PARAMS_PER_LAYER
class CudnnRNNRelu(_CudnnRNNNoInputC):
"""Cudnn implementation of the RNN-relu model."""
__doc__ += _cudnn_rnn_common_doc_string
_rnn_mode = CUDNN_RNN_RELU
# 1 set of weight and bias parameters for the recurrent input, and 1 for the
# previous layer input.
_NUM_PARAMS_PER_LAYER = CUDNN_RNN_RELU_PARAMS_PER_LAYER
| {
"content_hash": "5639760ff0fa56119768313eae0147ea",
"timestamp": "",
"source": "github",
"line_count": 1796,
"max_line_length": 103,
"avg_line_length": 40.99777282850779,
"alnum_prop": 0.6313423511516731,
"repo_name": "ageron/tensorflow",
"id": "7d848e2ec2d99cd2a78ff3e813207c0cd5bb97cf",
"size": "74321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644380"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59281238"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1501606"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908340"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94466"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15024"
},
{
"name": "Pascal",
"bytes": "617"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46230508"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481859"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
} |
"""
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division
import operator as op
from functools import reduce
from typing import List, Tuple
import numpy as np
import scipy.sparse as sp
import cvxpy.interface as intf
import cvxpy.lin_ops.lin_op as lo
import cvxpy.lin_ops.lin_utils as lu
import cvxpy.utilities as u
from cvxpy.atoms.affine.add_expr import AddExpression
from cvxpy.atoms.affine.affine_atom import AffAtom
from cvxpy.atoms.affine.conj import conj
from cvxpy.atoms.affine.reshape import deep_flatten
from cvxpy.atoms.affine.sum import sum as cvxpy_sum
from cvxpy.constraints.constraint import Constraint
from cvxpy.error import DCPError
from cvxpy.expressions.constants.parameter import (is_param_affine,
is_param_free,)
class BinaryOperator(AffAtom):
"""
Base class for expressions involving binary operators. (other than addition)
"""
OP_NAME = 'BINARY_OP'
def __init__(self, lh_exp, rh_exp) -> None:
super(BinaryOperator, self).__init__(lh_exp, rh_exp)
def name(self):
pretty_args = []
for a in self.args:
if isinstance(a, (AddExpression, DivExpression)):
pretty_args.append('(' + a.name() + ')')
else:
pretty_args.append(a.name())
return pretty_args[0] + ' ' + self.OP_NAME + ' ' + pretty_args[1]
def numeric(self, values):
"""Applies the binary operator to the values.
"""
return reduce(self.OP_FUNC, values)
def sign_from_args(self) -> Tuple[bool, bool]:
"""Default to rules for times.
"""
return u.sign.mul_sign(self.args[0], self.args[1])
def is_imag(self) -> bool:
"""Is the expression imaginary?
"""
return (self.args[0].is_imag() and self.args[1].is_real()) or \
(self.args[0].is_real() and self.args[1].is_imag())
def is_complex(self) -> bool:
"""Is the expression complex valued?
"""
return (self.args[0].is_complex() or self.args[1].is_complex()) and \
not (self.args[0].is_imag() and self.args[1].is_imag())
def matmul(lh_exp, rh_exp) -> "MulExpression":
"""Matrix multiplication."""
return MulExpression(lh_exp, rh_exp)
class MulExpression(BinaryOperator):
"""Matrix multiplication.
The semantics of multiplication are exactly as those of NumPy's
matmul function, except here multiplication by a scalar is permitted.
MulExpression objects can be created by using the '*' operator of
the Expression class.
Parameters
----------
lh_exp : Expression
The left-hand side of the multiplication.
rh_exp : Expression
The right-hand side of the multiplication.
"""
OP_NAME = "@"
OP_FUNC = op.mul
def numeric(self, values):
"""Matrix multiplication.
"""
if self.args[0].shape == () or self.args[1].shape == () or \
intf.is_sparse(values[0]) or intf.is_sparse(values[1]):
return values[0] * values[1]
else:
return np.matmul(values[0], values[1])
def shape_from_args(self) -> Tuple[int, ...]:
"""Returns the (row, col) shape of the expression.
"""
return u.shape.mul_shapes(self.args[0].shape, self.args[1].shape)
def is_atom_convex(self) -> bool:
"""Multiplication is convex (affine) in its arguments only if one of
the arguments is constant.
"""
if u.scopes.dpp_scope_active():
# This branch applies curvature rules for DPP.
#
# Because a DPP scope is active, parameters will be
# treated as affine (like variables, not constants) by curvature
# analysis methods.
#
# Like under DCP, a product x * y is convex if x or y is constant.
# If neither x nor y is constant, then the product is DPP
# if one of the expressions is affine in its parameters and the
# other is parameter-free.
x = self.args[0]
y = self.args[1]
return ((x.is_constant() or y.is_constant()) or
(is_param_affine(x) and is_param_free(y)) or
(is_param_affine(y) and is_param_free(x)))
else:
return self.args[0].is_constant() or self.args[1].is_constant()
def is_atom_concave(self) -> bool:
"""If the multiplication atom is convex, then it is affine.
"""
return self.is_atom_convex()
def is_atom_log_log_convex(self) -> bool:
"""Is the atom log-log convex?
"""
return True
def is_atom_log_log_concave(self) -> bool:
"""Is the atom log-log concave?
"""
return False
def is_incr(self, idx) -> bool:
"""Is the composition non-decreasing in argument idx?
"""
return self.args[1-idx].is_nonneg()
def is_decr(self, idx) -> bool:
"""Is the composition non-increasing in argument idx?
"""
return self.args[1-idx].is_nonpos()
def _grad(self, values):
"""Gives the (sub/super)gradient of the atom w.r.t. each argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
values: A list of numeric values for the arguments.
Returns:
A list of SciPy CSC sparse matrices or None.
"""
if self.args[0].is_constant() or self.args[1].is_constant():
return super(MulExpression, self)._grad(values)
# TODO(akshayka): Verify that the following code is correct for
# non-affine arguments.
X = values[0]
Y = values[1]
DX_rows = self.args[0].size
cols = self.args[0].size
# DX = [diag(Y11), diag(Y12), ...]
# [diag(Y21), diag(Y22), ...]
# [ ... ... ...]
DX = sp.dok_matrix((DX_rows, cols))
for k in range(self.args[0].shape[0]):
DX[k::self.args[0].shape[0], k::self.args[0].shape[0]] = Y
DX = sp.csc_matrix(DX)
cols = 1 if len(self.args[1].shape) == 1 else self.args[1].shape[1]
DY = sp.block_diag([X.T for k in range(cols)], 'csc')
return [DX, DY]
def graph_implementation(
self, arg_objs, shape: Tuple[int, ...], data=None
) -> Tuple[lo.LinOp, List[Constraint]]:
"""Multiply the linear expressions.
Parameters
----------
arg_objs : list
LinExpr for each argument.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
# Promote shapes for compatibility with CVXCanon
lhs = arg_objs[0]
rhs = arg_objs[1]
if self.args[0].is_constant():
return (lu.mul_expr(lhs, rhs, shape), [])
elif self.args[1].is_constant():
return (lu.rmul_expr(lhs, rhs, shape), [])
else:
raise DCPError("Product of two non-constant expressions is not "
"DCP.")
class multiply(MulExpression):
""" Multiplies two expressions elementwise.
"""
def __init__(self, lh_expr, rh_expr) -> None:
lh_expr, rh_expr = self.broadcast(lh_expr, rh_expr)
super(multiply, self).__init__(lh_expr, rh_expr)
def is_atom_log_log_convex(self) -> bool:
"""Is the atom log-log convex?
"""
return True
def is_atom_log_log_concave(self) -> bool:
"""Is the atom log-log concave?
"""
return True
def is_atom_quasiconvex(self) -> bool:
return (
self.args[0].is_constant() or self.args[1].is_constant()) or (
self.args[0].is_nonneg() and self.args[1].is_nonpos()) or (
self.args[0].is_nonpos() and self.args[1].is_nonneg())
def is_atom_quasiconcave(self) -> bool:
return (
self.args[0].is_constant() or self.args[1].is_constant()) or all(
arg.is_nonneg() for arg in self.args) or all(
arg.is_nonpos() for arg in self.args)
def numeric(self, values):
"""Multiplies the values elementwise.
"""
if sp.issparse(values[0]):
return values[0].multiply(values[1])
elif sp.issparse(values[1]):
return values[1].multiply(values[0])
else:
return np.multiply(values[0], values[1])
def shape_from_args(self) -> Tuple[int, ...]:
"""The sum of the argument dimensions - 1.
"""
return u.shape.sum_shapes([arg.shape for arg in self.args])
def is_psd(self) -> bool:
"""Is the expression a positive semidefinite matrix?
"""
return (self.args[0].is_psd() and self.args[1].is_psd()) or \
(self.args[0].is_nsd() and self.args[1].is_nsd())
def is_nsd(self) -> bool:
"""Is the expression a negative semidefinite matrix?
"""
return (self.args[0].is_psd() and self.args[1].is_nsd()) or \
(self.args[0].is_nsd() and self.args[1].is_psd())
def graph_implementation(
self, arg_objs, shape: Tuple[int, ...], data=None
) -> Tuple[lo.LinOp, List[Constraint]]:
"""Multiply the expressions elementwise.
Parameters
----------
arg_objs : list
LinExpr for each argument.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of exprraints)
"""
# promote if necessary.
lhs = arg_objs[0]
rhs = arg_objs[1]
if self.args[0].is_constant():
return (lu.multiply(lhs, rhs), [])
elif self.args[1].is_constant():
return (lu.multiply(rhs, lhs), [])
else:
raise DCPError("Product of two non-constant expressions is not "
"DCP.")
class DivExpression(BinaryOperator):
"""Division by scalar.
Can be created by using the / operator of expression.
"""
OP_NAME = "/"
OP_FUNC = np.divide
def __init__(self, lh_expr, rh_expr) -> None:
lh_expr, rh_expr = self.broadcast(lh_expr, rh_expr)
super(DivExpression, self).__init__(lh_expr, rh_expr)
def numeric(self, values):
"""Divides numerator by denominator.
"""
for i in range(2):
if sp.issparse(values[i]):
values[i] = values[i].todense().A
return np.divide(values[0], values[1])
def is_quadratic(self) -> bool:
return self.args[0].is_quadratic() and self.args[1].is_constant()
def has_quadratic_term(self) -> bool:
"""Can be a quadratic term if divisor is constant."""
return self.args[0].has_quadratic_term() and self.args[1].is_constant()
def is_qpwa(self) -> bool:
return self.args[0].is_qpwa() and self.args[1].is_constant()
def shape_from_args(self) -> Tuple[int, ...]:
"""Returns the (row, col) shape of the expression.
"""
return self.args[0].shape
def is_atom_convex(self) -> bool:
"""Division is convex (affine) in its arguments only if
the denominator is constant.
"""
return self.args[1].is_constant()
def is_atom_concave(self) -> bool:
return self.is_atom_convex()
def is_atom_log_log_convex(self) -> bool:
"""Is the atom log-log convex?
"""
return True
def is_atom_log_log_concave(self) -> bool:
"""Is the atom log-log concave?
"""
return True
def is_atom_quasiconvex(self) -> bool:
return self.args[1].is_nonneg() or self.args[1].is_nonpos()
def is_atom_quasiconcave(self) -> bool:
return self.is_atom_quasiconvex()
def is_incr(self, idx) -> bool:
"""Is the composition non-decreasing in argument idx?
"""
if idx == 0:
return self.args[1].is_nonneg()
else:
return self.args[0].is_nonpos()
def is_decr(self, idx) -> bool:
"""Is the composition non-increasing in argument idx?
"""
if idx == 0:
return self.args[1].is_nonpos()
else:
return self.args[0].is_nonneg()
def graph_implementation(
self, arg_objs, shape: Tuple[int, ...], data=None
) -> Tuple[lo.LinOp, List[Constraint]]:
"""Multiply the linear expressions.
Parameters
----------
arg_objs : list
LinExpr for each argument.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
return (lu.div_expr(arg_objs[0], arg_objs[1]), [])
def scalar_product(x, y):
"""
Return the standard inner product (or "scalar product") of (x,y).
Parameters
----------
x : Expression, int, float, NumPy ndarray, or nested list thereof.
The conjugate-linear argument to the inner product.
y : Expression, int, float, NumPy ndarray, or nested list thereof.
The linear argument to the inner product.
Returns
-------
expr : Expression
The standard inner product of (x,y), conjugate-linear in x.
We always have ``expr.shape == ()``.
Notes
-----
The arguments ``x`` and ``y`` can be nested lists; these lists
will be flattened independently of one another.
For example, if ``x = [[a],[b]]`` and ``y = [c, d]`` (with ``a,b,c,d``
real scalars), then this function returns an Expression representing
``a * c + b * d``.
"""
x = deep_flatten(x)
y = deep_flatten(y)
prod = multiply(conj(x), y)
return cvxpy_sum(prod)
| {
"content_hash": "bf7c80b6989a706c67f741a3cfe6323e",
"timestamp": "",
"source": "github",
"line_count": 451,
"max_line_length": 80,
"avg_line_length": 32.350332594235034,
"alnum_prop": 0.5725839616175462,
"repo_name": "merraksh/cvxpy",
"id": "f0e999385c3fddc18a3e34eb05246504199812c0",
"size": "14590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cvxpy/atoms/affine/binary_operators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "120010"
},
{
"name": "C++",
"bytes": "5687983"
},
{
"name": "CMake",
"bytes": "694"
},
{
"name": "Makefile",
"bytes": "6320"
},
{
"name": "Python",
"bytes": "2149670"
},
{
"name": "SWIG",
"bytes": "2403"
},
{
"name": "Shell",
"bytes": "3117"
}
],
"symlink_target": ""
} |
from pytz import timezone
from django.contrib.auth import get_user_model
from django.db.models import Q
from django.template import loader
from rest_framework.response import Response
from add2cal import Add2Cal
from accelerator_abstract.models.base_user_utils import is_employee
from accelerator.models import (
MentorProgramOfficeHour,
Startup,
)
from ...permissions.v1_api_permissions import (
RESERVE_PERMISSION_DENIED_DETAIL,
IsAuthenticated,
)
from ...views import ADD2CAL_DATE_FORMAT
from .impact_view import ImpactView
from .utils import (
email_template_path,
is_office_hour_reserver,
office_hour_time_info,
datetime_is_in_past,
)
from ...minimal_email_handler import send_email
User = get_user_model()
mentor_template_name = "reserve_office_hour_email_to_mentor.html"
finalist_template_name = "reserve_office_hour_email_to_finalist.html"
ICS_FILENAME = 'reminder.ics'
ICS_FILETYPE = 'text/calendar'
class ReserveOfficeHourView(ImpactView):
view_name = "reserve_office_hour"
permission_classes = [IsAuthenticated]
OFFICE_HOUR_TITLE = "Office Hours Session with {}"
SUCCESS_HEADER = "Office Hour reserved with {}"
SUCCESS_PAST_DETAIL = ("This office officehour occurs in the past")
FAIL_HEADER = "Office hour could not be reserved"
NO_OFFICE_HOUR_SPECIFIED = "No office hour was specified"
NO_SUCH_OFFICE_HOUR = "This office hour is no longer available."
NO_SUCH_STARTUP = "No such startup exists"
NO_SUCH_USER = "No such user exists"
OFFICE_HOUR_ALREADY_RESERVED = "That session has already been reserved"
SUBJECT = "Office Hours Reservation Notification"
STARTUP_NOT_ASSOCIATED_WITH_USER = ("The selected startup is not a valid "
"choice for {}")
USER_CANNOT_RESERVE_OFFICE_HOURS = ("The selected user is not allowed to "
"reserve office hour sessions.")
CONFLICT_EXISTS = ("The requested time overlaps with another "
"existing officehours")
def post(self, request):
'''
params:
office_hour_id (required)
user_id (optional, defaults to request.user)
startup_id (optional)
'''
(self._extract_request_data(request) and
self._reserve_office_hour())
return self._response()
def _extract_request_data(self, request):
if not (self._extract_office_hour(request) and
self._extract_user(request) and
self._extract_startup(request)):
return False
self.message = request.data.get("message", "")
return True
def _extract_office_hour(self, request):
office_hour_id = request.data.get("office_hour_id", None)
if office_hour_id is None:
self.fail(self.NO_OFFICE_HOUR_SPECIFIED)
return False
try:
self.office_hour = MentorProgramOfficeHour.objects.get(
pk=office_hour_id)
except MentorProgramOfficeHour.DoesNotExist:
self.fail(self.NO_SUCH_OFFICE_HOUR)
return False
return True
def _extract_user(self, request):
user_id = request.data.get("user_id", None)
if user_id is not None and user_id != request.user.id:
try:
self.target_user = User.objects.get(pk=user_id)
except User.DoesNotExist:
self.fail(self.NO_SUCH_USER)
return False
if is_employee(request.user):
self.on_behalf_of = True
else:
self.fail(RESERVE_PERMISSION_DENIED_DETAIL)
return False
else:
self.target_user = request.user
self.on_behalf_of = False
if not is_office_hour_reserver(self.target_user):
self.fail(self.USER_CANNOT_RESERVE_OFFICE_HOURS)
return False
return True
def _extract_startup(self, request):
startup_id = request.data.get("startup_id", None)
if startup_id is None:
self.startup = None
else:
try:
self.startup = Startup.objects.get(pk=startup_id)
except Startup.DoesNotExist:
self.fail(self.NO_SUCH_STARTUP)
return False
if not self.target_user.startupteammember_set.filter(
startup=self.startup).exists():
self.fail(self.STARTUP_NOT_ASSOCIATED_WITH_USER.format(
self.target_user.email))
return False
return True
def _reserve_office_hour(self):
if self.office_hour.finalist is not None:
self.fail(self.OFFICE_HOUR_ALREADY_RESERVED)
return False
if self._conflict_exists():
self.fail(self.CONFLICT_EXISTS)
return False
self._update_office_hour_data()
self._send_confirmation_emails()
self._succeed()
return True
def _conflict_exists(self):
start = self.office_hour.start_date_time
end = self.office_hour.end_date_time
start_conflict = (Q(start_date_time__gt=start) &
Q(start_date_time__lt=end))
end_conflict = (Q(end_date_time__gt=start) &
Q(end_date_time__lt=end))
enclosing_conflict = (Q(start_date_time__lte=start) &
Q(end_date_time__gte=end))
if self.target_user.finalist_officehours.filter(
start_conflict | end_conflict | enclosing_conflict).exists():
return True
return False
def _update_office_hour_data(self):
self.office_hour.finalist = self.target_user
self.office_hour.topics = self.message
self.office_hour.startup = self.startup
self.office_hour.save()
def _send_confirmation_emails(self):
mentor = self.office_hour.mentor
finalist = self.target_user
send_email(**self.prepare_email_notification(mentor,
finalist,
mentor_template_name,
True))
send_email(**self.prepare_email_notification(finalist,
mentor,
finalist_template_name))
def prepare_email_notification(self,
recipient,
counterpart,
template_name,
mentor_recipient=False):
template_path = email_template_path(template_name)
if self.startup:
startup_name = self.startup.organization.name
else:
startup_name = ""
self.mentor_recipient = mentor_recipient
context = {"recipient": recipient,
"counterpart": counterpart,
"startup": startup_name,
"message": self.message,
"calendar_data": self.get_calendar_data(counterpart)
}
context.update(office_hour_time_info(self.office_hour))
html_email = loader.render_to_string(template_path, context)
return {"to": [recipient.email],
"subject": self.SUBJECT,
"body": None,
"attachment": (ICS_FILENAME,
self.calendar_data['ical_content'],
ICS_FILETYPE),
"attach_alternative": (html_email, 'text/html')
}
def _succeed(self):
if self.office_hour.startup:
startup_name = self.office_hour.startup.organization.name
else:
startup_name = ""
self.success = True
self.header = self.SUCCESS_HEADER.format(
self.office_hour.mentor.full_name())
self.detail = self._get_detail()
self.timecard_info = {
"finalist_first_name": self.target_user.first_name,
"finalist_last_name": self.target_user.last_name,
"finalist_email": self.target_user.email,
"topics": self.message,
"startup": startup_name,
"calendar_data": self.get_calendar_data(self.office_hour.mentor),
}
def _get_detail(self):
start_date_time = self.office_hour.start_date_time
if datetime_is_in_past(start_date_time):
return self.SUCCESS_PAST_DETAIL
else:
return ""
def fail(self, detail):
self.success = False
self.header = self.FAIL_HEADER
self.detail = detail
self.timecard_info = {}
def _response(self):
return Response({
'success': self.success,
'header': self.header,
'detail': self.detail,
'timecard_info': self.timecard_info})
def get_calendar_data(self, counterpart_name):
if hasattr(self, "calendar_data"):
return self.calendar_data
name = counterpart_name
if self.mentor_recipient:
name = self.startup.name if self.startup else counterpart_name
title = self.OFFICE_HOUR_TITLE.format(name)
office_hour = self.office_hour
tz_str = ""
if office_hour.location is None:
tz_str = "UTC"
location = ""
else:
tz_str = office_hour.location.timezone
location = office_hour.location
tz = timezone(tz_str)
meeting_info = office_hour.meeting_info
separator = ';' if office_hour.location and meeting_info else ""
location_info = "{location}{separator}{meeting_info}"
location_info = location_info.format(location=location,
separator=separator,
meeting_info=meeting_info)
self.calendar_data = Add2Cal(
start=office_hour.start_date_time.astimezone(tz).strftime(
ADD2CAL_DATE_FORMAT),
end=office_hour.end_date_time.astimezone(tz).strftime(
ADD2CAL_DATE_FORMAT),
title=title,
description=self._get_description(counterpart_name),
location=location_info,
timezone=tz).as_dict()
return self.calendar_data
def _get_description(self, counterpart_name):
topics_block = ""
attendees_block = """
Attendees:\n- {mentor_email}\n- {finalist_email} - {finalist_phone}\n
"""
finalist = self.startup if self.startup else counterpart_name
if self.office_hour.topics:
topics_block = "Message from {finalist}:\n{topics}\n".format(
topics=self.office_hour.topics,
finalist=finalist)
mentor_email = self.office_hour.mentor.email
finalist_email = self.target_user.email
finalist_phone = self.target_user.user_phone()
attendees_block = attendees_block.format(mentor_email=mentor_email,
finalist_email=finalist_email,
finalist_phone=finalist_phone)
description = """
{attendees_block}
{topics_block}
"""
return description.format(topics_block=topics_block,
attendees_block=attendees_block)
| {
"content_hash": "7d02248b01484552658e37b30d67d9f3",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 79,
"avg_line_length": 38.86486486486486,
"alnum_prop": 0.5684109874826148,
"repo_name": "masschallenge/impact-api",
"id": "5564355ae1147b0b9fb8552654b9ced22488095b",
"size": "11504",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "web/impact/impact/v1/views/reserve_office_hour_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5077"
},
{
"name": "Dockerfile",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "11542"
},
{
"name": "JavaScript",
"bytes": "2332"
},
{
"name": "Makefile",
"bytes": "17106"
},
{
"name": "Python",
"bytes": "607293"
},
{
"name": "Shell",
"bytes": "5185"
}
],
"symlink_target": ""
} |
'''
Created on Apr 10, 2013
@author: Mark V Systems Limited
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
'''
import time
from arelle import ModelXbrl, XbrlConst, XmlValidate
from arelle.ModelObject import ModelObject
from arelle.ModelDtsObject import ModelAttribute, ModelConcept, ModelType
from arelle.ModelValue import qname
from arelle.Locale import format_string
from lxml import etree
XMLSchemaURI = "http://www.w3.org/2001/XMLSchema.xsd"
def validate(modelDocument, schemaElement, targetNamespace):
modelXbrl = modelDocument.modelXbrl
modelManager = modelXbrl.modelManager
"""
if not hasattr(modelManager, "xmlSchemaSchema"):
if getattr(modelManager, "modelXmlSchemaIsLoading", False):
return
startedAt = time.time()
modelManager.modelXmlSchemaIsLoading = True
priorValidateDisclosureSystem = modelManager.validateDisclosureSystem
modelManager.validateDisclosureSystem = False
modelManager.xmlSchemaSchema = ModelXbrl.load(modelManager, XMLSchemaURI, _("validate schema"))
modelManager.validateDisclosureSystem = priorValidateDisclosureSystem
'''
filePath = modelManager.cntlr.webCache.getfilename(XMLSchemaURI)
modelManager.showStatus(_("lxml compiling XML Schema for Schemas"))
modelManager.xmlSchemaSchema = etree.XMLSchema(file=filePath)
'''
modelXbrl.info("info:xmlSchemaValidator", format_string(modelXbrl.modelManager.locale,
_("schema for XML schemas loaded into lxml %.3f secs"),
time.time() - startedAt),
modelDocument=XMLSchemaURI)
modelManager.showStatus("")
del modelManager.modelXmlSchemaIsLoading
'''
#startedAt = time.time()
#validationSuccess = modelManager.xmlSchemaSchema.validate(schemaElement)
#modelXbrl.info("info:xmlSchemaValidator", format_string(modelXbrl.modelManager.locale,
# _("schema validated in %.3f secs"),
# time.time() - startedAt),
# modelDocument=modelDocument)
if not validationSuccess:
for error in modelManager.xmlSchemaSchema.error_log:
modelXbrl.error("xmlSchema:syntax",
_("%(error)s, %(fileName)s, line %(line)s, column %(column)s, %(sourceAction)s source element"),
modelObject=modelDocument, fileName=modelDocument.basename,
error=error.message, line=error.line, column=error.column, sourceAction=("xml schema"))
modelManager.xmlSchemaSchema._clear_error_log()
'''
"""
#XmlValidate.validate(modelXbrl, schemaElement) # use arelle schema validation
declaredNamespaces = set(doc.targetNamespace
for doc, docRef in modelDocument.referencesDocument.items()
if docRef.referenceType in ("include", "import"))
if targetNamespace:
declaredNamespaces.add(targetNamespace)
if targetNamespace in ("http://www.w3.org/2001/XMLSchema",
"http://www.w3.org/XML/1998/namespace",
): # or (
# targetNamespace and targetNamespace.startswith("http://www.w3.org/1999/xhtml")):
return # don't validate w3c schemas
# check schema semantics
def resolvedQnames(elt, qnDefs):
for attrName, attrType, mdlObjects, isQualifiedForm in qnDefs:
attr = elt.get(attrName)
if attr is not None:
try:
qnValue = elt.schemaNameQname(attr,
isQualifiedForm=isQualifiedForm or elt.isQualifiedForm,
prefixException=ValueError)
if qnValue.namespaceURI == XbrlConst.xsd:
if attrType != ModelType:
raise ValueError("{0} can not have xml schema namespace".format(attrName))
if qnValue.localName not in {
"anySimpleType", "anyType",
"string", "boolean", "float", "double", "decimal", "duration", "dateTime", "time", "date",
"gYearMonth", "gYear", "gMonthDay", "gDay", "gMonth",
"hexBinary", "base64Binary",
"anyURI", "QName", "NOTATION",
"normalizedString", "token", "language",
"IDREFS", "ENTITIES", "NMTOKEN", "NMTOKENS", "NCName",
"ID", "IDREF",
"integer", "nonPositiveInteger", "negativeInteger",
"long", "int", "short", "byte",
"nonNegativeInteger", "unsignedLong", "unsignedInt", "unsignedShort", "unsignedByte",
"positiveInteger"
}:
raise ValueError("{0} qname {1} not recognized".format(attrName, attr))
# qname must be defined in an imported or included schema
elif qnValue.namespaceURI and qnValue.namespaceURI not in declaredNamespaces:
raise ValueError("Namespace is not defined by an import or include element")
elif qnValue not in mdlObjects:
raise ValueError("{0} is not defined".format(attrName))
elif not isinstance(mdlObjects[qnValue], attrType):
raise ValueError("{0} not resolved to expected object type".format(attrName))
except ValueError as err:
modelXbrl.error("xmlSchema:valueError",
_("Element attribute %(typeName)s value error: %(value)s, %(error)s"),
modelObject=elt,
typeName=attrName,
value=attr,
error=err)
def checkSchemaElements(parentElement):
for elt in parentElement.iterchildren():
if isinstance(elt,ModelObject) and elt.namespaceURI == XbrlConst.xsd:
ln = elt.localName
if ln == "element":
resolvedQnames(elt, (("ref", ModelConcept, modelXbrl.qnameConcepts, False),
("substitutionGroup", ModelConcept, modelXbrl.qnameConcepts, True),
("type", ModelType, modelXbrl.qnameTypes, True)))
elif ln == "attribute":
resolvedQnames(elt, (("ref", ModelAttribute, modelXbrl.qnameAttributes, False),
("type", ModelType, modelXbrl.qnameTypes, True)))
checkSchemaElements(elt)
checkSchemaElements(schemaElement)
| {
"content_hash": "7c3d0fd70fcf463df22d578c01689dcb",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 123,
"avg_line_length": 55.3046875,
"alnum_prop": 0.559824834016104,
"repo_name": "sternshus/Arelle",
"id": "acd471436187f7c356378b18b8752731a678c599",
"size": "7079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arelle/XmlValidateSchema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "31873"
},
{
"name": "C#",
"bytes": "850"
},
{
"name": "HTML",
"bytes": "8640"
},
{
"name": "Java",
"bytes": "4663"
},
{
"name": "Makefile",
"bytes": "5565"
},
{
"name": "NSIS",
"bytes": "9050"
},
{
"name": "PLSQL",
"bytes": "1056360"
},
{
"name": "Python",
"bytes": "5523072"
},
{
"name": "Shell",
"bytes": "13921"
}
],
"symlink_target": ""
} |
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-league',
version='0.1',
packages=find_packages(),
include_package_data=True,
license='MIT License', # example license
description='Django app which makes easy to create sports team website.',
long_description=README,
url='https://www.aviators.com.pl/',
author='Michal Gawrys',
author_email='mgawrys@yourway.pl',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.9', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| {
"content_hash": "80c9757ade5f4420af7328d1892800fa",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 37.111111111111114,
"alnum_prop": 0.6302395209580839,
"repo_name": "mgawrys1/django-league",
"id": "a92dfd4a5cc4d44f79b671ccc68a4e73c2323066",
"size": "1336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10337"
},
{
"name": "Python",
"bytes": "38809"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='pyem410x',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0',
description='A python module for encode & decode Em410x.',
long_description="A python module for encode & decode Em410x.",
# The project's main homepage.
url='https://github.com/yrjyrj123/pyem410x',
# Author details
author='yrjyrj123',
author_email='yrjyrjwp7@hotmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
# What does your project relate to?
keywords='em410x t5577 t55xx rfid id card',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['pyem410x'],
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['bitstring'],
) | {
"content_hash": "484b74c6b13f85dd0ea4d876aea28b95",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 79,
"avg_line_length": 35.3968253968254,
"alnum_prop": 0.647085201793722,
"repo_name": "yrjyrj123/pyem410x",
"id": "dc07a36e8a43703a8e216b9f34d72379fbd226f8",
"size": "2230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5289"
}
],
"symlink_target": ""
} |
"""
lassie.api
~~~~~~~~~~
This module implements the Lassie API.
"""
from .core import Lassie
def fetch(url, **kwargs):
"""Constructs and sends a :class:`Lassie <Lassie>`
Retrieves content from the specified url, parses it, and returns
a beautifully crafted dictionary of important information about that
web page.
Priority tree is as follows:
1. Open Graph
2. Twitter Card
3. Other meta content (i.e. description, keywords)
:param url: URL to send a GET request to
:param open_graph: (optional) If ``True``, filters web page content for Open Graph meta tags. The content of these properties have top priority on return values.
:type open_graph: bool
:param twitter_card: (optional) If ``True``, filters web page content for Twitter Card meta tags
:type twitter_card: bool
:param touch_icon: (optional) If ``True``, retrieves Apple touch icons and includes them in the response ``images`` array
:type touch_icon: bool
:param favicon: (optional) If ``True``, retrieves any favicon images and includes them in the response ``images`` array
:type favicon: bool
:param all_images: (optional) If ``True``, retrieves images inside web pages body and includes them in the response ``images`` array. Default: False
:type all_images: bool
:param parser: (optional) String reference for the parser that BeautifulSoup will use
:type parser: string
:param handle_file_content: (optional) If ``True``, lassie will return a generic response when a file is fetched. Default: False
:type handle_file_content: bool
"""
l = Lassie()
return l.fetch(url, **kwargs)
| {
"content_hash": "6df3dd96cbf36c64b6fa7e6520a1ed25",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 165,
"avg_line_length": 40.707317073170735,
"alnum_prop": 0.695026962252846,
"repo_name": "michaelhelmick/lassie",
"id": "07cd2a2cc825c4b1f2535a6926bf959ebb5e208d",
"size": "1694",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "lassie/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "216232"
},
{
"name": "Python",
"bytes": "47868"
}
],
"symlink_target": ""
} |
"""ROS node for drawing boxes over tip detection results.
"""
import itertools
import cv2
import rospy
from tip_detection import image_to_array, array_to_image
# Import the ROS message type(s) we'll be using.
from sensor_msgs.msg import Image
from tip_detection_msgs.msg import Tips, Tip
class TipHighlighter(object):
"""An object which is passed incoming images and tip detection results and
publishes an image when one or other matches on sequence number.
The object maintains a small buffer of incoming images and results in order
to match up detection results with images. This is probably overkill.
*publisher* is a ROS publisher to which the outgoing image should be written.
"""
def __init__(self, publisher):
self._image_pub = publisher
# Buffers for incoming images and tips. The first item in the list is
# the most recently received message.
self._image_buffer = []
self._tips_buffer = []
# The sequence number of the last drawn image to avoid double-draw
self._last_published_seq = None
def new_image(self, image):
"""Call this when there is a new incoming image."""
self._image_buffer = [image,] + self._image_buffer[:10]
self._check_for_match()
def new_tips(self, tips):
"""Call this when there are some new incoming tips."""
self._tips_buffer = [tips,] + self._tips_buffer[:2]
self._check_for_match()
def _check_for_match(self):
"""Called when a new image or tips result has arrived. Look for the
latest match based on sequence number.
"""
# This uses some cool Python iterator magic :)
all_pairs = itertools.product(self._image_buffer, self._tips_buffer)
matches = list(
(image, tips)
for image, tips in all_pairs
if image.header.stamp == tips.header.stamp
)
if len(matches) == 0:
return
# Sort matches by image sequence number
matches.sort(key=lambda m: m[0].header.seq, reverse=True)
# Don't publish if we already have
image, tips = matches[0]
if image.header.seq == self._last_published_seq:
return
# We have a match, publish a combined image
self._draw_tips(image, tips)
def _draw_tips(self, image, tips):
"""Take an Image message and a Tips message, draw a boc around the
tips in the image and publish the result.
Record *image*'s sequence number in _last_published_seq.
"""
# Convert image into RGB image
rgb_im = image_to_array(image)
# Use OpenCV to draw boxes in image
for tip in tips.tips:
roi = tip.roi
# NB: cv2.circle will modify the image it works on.
if (roi.x_offset, roi.y_offset)!=(0,0):
cv2.circle(rgb_im,
(roi.x_offset, roi.y_offset),
5,
(0, 255, 0), # green
2 # thickness
)
# Publish image
self._image_pub.publish(array_to_image(rgb_im))
self._last_published_seq = image.header.seq
def main():
"""Entry point for node."""
# Register ourselves as a node
rospy.init_node('highlight_tips')
# Create a publisher for the highlighted camera image
image_pub = rospy.Publisher(rospy.get_name() + '/image_raw', Image,
queue_size=1)
# Create the object which does the bulk of the work
tip_higlighter = TipHighlighter(image_pub)
# Create subscribers to the incoming images and incoming tip detection
# results.
rospy.Subscriber('camera/image_raw', Image, tip_higlighter.new_image,
queue_size=1)
rospy.Subscriber('tip_detect/tips', Tips, tip_higlighter.new_tips,
queue_size=1)
# Run the event loop. Only returns once the node has shutdown.
rospy.spin()
# Boilerplate to run main() when this file is run.
if __name__ == '__main__':
main()
| {
"content_hash": "a2409df11be87583861f3a72be82b308",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 81,
"avg_line_length": 32.54032258064516,
"alnum_prop": 0.6173482032218092,
"repo_name": "sigproc/robotic_surgery",
"id": "61aa76cc5bf1ef6d6dc2a93a7ec57649ff71647c",
"size": "5195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ros/tip_detection/scripts/highlight_tips.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "95098"
},
{
"name": "CMake",
"bytes": "87588"
},
{
"name": "Makefile",
"bytes": "6460"
},
{
"name": "Python",
"bytes": "208268"
},
{
"name": "Shell",
"bytes": "1502"
}
],
"symlink_target": ""
} |
from django.db.models import Lookup, Transform
class PostgresSimpleLookup(Lookup):
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return '%s %s %s' % (lhs, self.operator, rhs), params
class DataContains(PostgresSimpleLookup):
lookup_name = 'contains'
operator = '@>'
class ContainedBy(PostgresSimpleLookup):
lookup_name = 'contained_by'
operator = '<@'
class Overlap(PostgresSimpleLookup):
lookup_name = 'overlap'
operator = '&&'
class HasKey(PostgresSimpleLookup):
lookup_name = 'has_key'
operator = '?'
class HasKeys(PostgresSimpleLookup):
lookup_name = 'has_keys'
operator = '?&'
class HasAnyKeys(PostgresSimpleLookup):
lookup_name = 'has_any_keys'
operator = '?|'
class Unaccent(Transform):
bilateral = True
lookup_name = 'unaccent'
function = 'UNACCENT'
| {
"content_hash": "6687626e231b1283fe10ccb2f6db051a",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 61,
"avg_line_length": 23.066666666666666,
"alnum_prop": 0.6319845857418112,
"repo_name": "yephper/django",
"id": "6976d1d8f1c170a464f6d5fd5fd4c0061363140e",
"size": "1038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/postgres/lookups.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
"""Pyramid bootstrap environment. """
from alembic import context
from pyramid.paster import get_appsettings, setup_logging
from sqlalchemy import engine_from_config
from amnesia.db.meta import metadata
config = context.config
setup_logging(config.config_file_name)
settings = get_appsettings(config.config_file_name, 'amnesia')
target_metadata = metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=settings['sqlalchemy.url'])
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(settings, prefix='sqlalchemy.')
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| {
"content_hash": "ceb7cede280276b6a2411ce5e9553fec",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 64,
"avg_line_length": 25.103448275862068,
"alnum_prop": 0.7012362637362637,
"repo_name": "silenius/amnesia",
"id": "dc571bfcc912b1a5d5846c45c944478792a8ef3a",
"size": "1456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amnesia/alembic/env.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "260179"
},
{
"name": "HTML",
"bytes": "14462"
},
{
"name": "JavaScript",
"bytes": "113808"
},
{
"name": "Mako",
"bytes": "806"
},
{
"name": "PLpgSQL",
"bytes": "18006"
},
{
"name": "Python",
"bytes": "274296"
}
],
"symlink_target": ""
} |
from .method import wrap_method
from .attribute import Attribute
from .device import Device
from .pausableDevice import PausableDevice
from .runnableDevice import DState, DEvent, RunnableDevice
| {
"content_hash": "ae1378b7da54bf290eaa25a25047ccfc",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 58,
"avg_line_length": 38.8,
"alnum_prop": 0.845360824742268,
"repo_name": "ulrikpedersen/malcolm",
"id": "1f73096c1baa4d5882f4159c5a66eb60c9b8c49a",
"size": "194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "malcolm/core/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1000"
},
{
"name": "Python",
"bytes": "150960"
},
{
"name": "Shell",
"bytes": "75"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/indigo/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/indigo/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/gl/catkin_ws/devel;/opt/ros/indigo".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/gl/catkin_hrg/devel/env.sh')
output_filename = '/home/gl/catkin_hrg/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| {
"content_hash": "0a17e904df2b2f3ff86d1f4de8a64dc6",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 102,
"avg_line_length": 43.6551724137931,
"alnum_prop": 0.717219589257504,
"repo_name": "iglstone/catkin_hrg",
"id": "b6d70a7590fdbac5c68f430337227eb9376c1df5",
"size": "1290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/catkin_generated/generate_cached_setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11935"
},
{
"name": "C++",
"bytes": "146659"
},
{
"name": "CMake",
"bytes": "284245"
},
{
"name": "Common Lisp",
"bytes": "44930"
},
{
"name": "Makefile",
"bytes": "279132"
},
{
"name": "NewLisp",
"bytes": "87624"
},
{
"name": "Python",
"bytes": "232641"
},
{
"name": "Shell",
"bytes": "12123"
}
],
"symlink_target": ""
} |
from twisted.test import proto_helpers
from twisted.trial import unittest
from joker.bot import JokerBotFactory
def _common_setup(self):
self.factory = JokerBotFactory()
self.bot = self.factory.buildProtocol(('127.0.0.1', 0))
self.fake_transport = proto_helpers.StringTransport()
self.bot.makeConnection(self.fake_transport)
self.bot.signedOn()
self.fake_transport.clear()
class JokerBotTestCase(unittest.SynchronousTestCase):
_channel = "#testchannel"
_username = "tester"
_us = 'tbb'
def setUp(self):
_common_setup(self)
def test_when_join_a_channel_bot_must_send_greetings_to_everyone(self):
self.bot.joined(self._channel)
self.assertEqual('PRIVMSG {channel} :{message}\r\n'.format(channel=self._channel, username=self._username, message='Bom dia!'), self.fake_transport.value())
def test_when_someone_join_a_channel_bot_must_send_a_greeting_to_her(self):
test_user = "arthur"
self.bot.userJoined(test_user, self._channel)
self.assertEqual('PRIVMSG {channel} :{message}\r\n'.format(channel=self._channel, username=test_user, message='Bom dia, {0}!'.format(test_user)), self.fake_transport.value())
| {
"content_hash": "abca09a49a0f1837f5666bd3e5e3a789",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 182,
"avg_line_length": 36.57575757575758,
"alnum_prop": 0.6992543496271748,
"repo_name": "rennerocha/joker",
"id": "ad5fbd8a1134712bb3483571b26dddf462060659",
"size": "1231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "joker/test/test_joker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1851"
}
],
"symlink_target": ""
} |
import sys
import os
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_root)
import matplotlib as mpl
mpl.use("Agg")
import argparse
import h5py
import itertools
import numpy
import pylru
from multiprocessing import Process, Array, Queue
import ctypes
import arl.test_support
from crocodile.synthesis import *
import util.visualize
import matplotlib.pyplot as plt
# Parse arguments
parser = argparse.ArgumentParser(description='Grid a data set')
parser.add_argument('input', metavar='input', type=argparse.FileType('r'),
help='input visibilities')
parser.add_argument('--lambda', dest='lam', type=float,
help='Size of uvw-plane')
parser.add_argument('--out', dest='out', type=argparse.FileType('w'),
help='Output image')
args = parser.parse_args()
# Open input file
print("Reading %s..." % args.input.name)
input = h5py.File(args.input.name, "r")
# Get baselines
print("Reading baselines...")
viss = arl.test_support.import_visibility_baselines_from_hdf5(input)
print("Got %d visibility chunks" % len(viss))
# Utility to collect data from visibility blocks
def collect_blocks(prop):
result = []
for vis in viss:
vres = []
for chan in range(len(vis.frequency)):
vres.append(prop(vis, chan))
result.append(numpy.vstack(numpy.transpose(vres, (1,0,2))))
return numpy.vstack(result)
uvw = collect_blocks(lambda vis, chan: vis.uvw_lambda(chan))
# Show statistics
print()
print("Have %d visibilities" % uvw.shape[0])
print("u range: %.2f - %.2f lambda" % (numpy.min(uvw[:,0]), numpy.max(uvw[:,0])))
print("v range: %.2f - %.2f lambda" % (numpy.min(uvw[:,1]), numpy.max(uvw[:,1])))
print("w range: %.2f - %.2f lambda" % (numpy.min(uvw[:,2]), numpy.max(uvw[:,2])))
print()
plt.scatter(uvw[:,0], uvw[:,1], c=uvw[:,2], lw=0, s=.01)
plt.scatter(-uvw[:,0], -uvw[:,1], c=-uvw[:,2], lw=0, s=.01)
plt.xlabel('u [lambda]')
plt.ylabel('v [lambda]')
if args.lam is not None:
plt.xlim(-args.lam/2, args.lam/2)
plt.ylim(-args.lam/2, args.lam/2)
plt.colorbar()
if args.out is not None:
plt.savefig(args.out.name, dpi=1200)
else:
plt.show()
| {
"content_hash": "0aaa3c5ebd8a18ff4fd6283fbe7c876e",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 85,
"avg_line_length": 29.986486486486488,
"alnum_prop": 0.6566020730058585,
"repo_name": "SKA-ScienceDataProcessor/crocodile",
"id": "208254f71b0ad593f1b0789baf476db554c9c055",
"size": "2239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/uvw_coverage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "161449"
}
],
"symlink_target": ""
} |
"""
Given an integer array, you need to find one continuous subarray that if you only sort this subarray in ascending order, then the whole array will be sorted in ascending order, too.
You need to find the shortest such subarray and output its length.
Example 1:
Input: [2, 6, 4, 8, 10, 9, 15]
Output: 5
Explanation: You need to sort [6, 4, 8, 10, 9] in ascending order to make the whole array sorted in ascending order.
Note:
Then length of the input array is in range [1, 10,000].
The input array may contain duplicates, so ascending order here means <=.
"""
class Solution(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
temp = []
for e in nums:
temp.append(e)
temp.sort()
low = 0
high = len(nums) - 1
flag = 0
while low < len(nums) and high > 0:
if nums[low] == temp[low]:
low += 1
else:
flag = 1
if nums[high] == temp[high]:
high -= 1
else:
flag = 1
break
if flag:
return high - low + 1
else:
return 0
if __name__ == "__main__":
sample = Solution()
print(sample.findUnsortedSubarray([2]))
| {
"content_hash": "91a2037f390afb3cefb9250e32dd648a",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 181,
"avg_line_length": 28.5531914893617,
"alnum_prop": 0.53725782414307,
"repo_name": "Vonzpf/LeetCode",
"id": "c4d95a6ca23c7bf26a7b880fd64b970709943113",
"size": "1380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ShortestUnsortedContinuousSubarray.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "25807"
},
{
"name": "Python",
"bytes": "43800"
}
],
"symlink_target": ""
} |
""" Logging utility"""
import os, errno
import logging
from logging.handlers import RotatingFileHandler
DEFAULT_FORMAT = logging.Formatter('%(asctime)s - '
'%(filename)s:%(funcName)s '
'%(levelname)s - '
'%(lineno)d:\t'
'- %(message)s')
DEFAULT_LOGGER = logging.getLogger("ksiga")
DEFAULT_LOGGER.addHandler(logging.StreamHandler())
DEFAULT_LOGGER.setLevel(logging.INFO)
def getLogger(name, level=logging.INFO, logformat=DEFAULT_FORMAT):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(level)
sh.setFormatter(logformat)
# try:
# os.makedirs("logs")
# except OSError as e:
# if e.errno == errno.EEXIST and os.path.isdir("logs"):
# pass
# else:
# raise
# fh = RotatingFileHandler("logs/kali.log", maxBytes=21474836480, backupCount=5) # 20 MB
# fh.setLevel(logging.DEBUG)
# fh.setFormatter(logformat)
logger.addHandler(sh)
# logger.addHandler(fh)
return logger
def conditional_logging(logger, method, message):
"""Log only if the global variable is available (LOGGING=TRUE)"""
global LOGGING
if LOGGING is True:
logging_method = getattr(logger, method)
logging_method(message)
def debug(message):
DEFAULT_LOGGER.debu(message)
pass
def info(message):
DEFAULT_LOGGER.info(message)
def warning(message):
DEFAULT_LOGGER.warning(message)
def notify(message):
DEFAULT_LOGGER.notify(message) | {
"content_hash": "8a5d0038e263f9e9abb35ee6fcd0f2b5",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 93,
"avg_line_length": 23.941176470588236,
"alnum_prop": 0.6253071253071253,
"repo_name": "yumyai/ksiga",
"id": "22b642e354a8d8a172702eb06cd87406d9c46412",
"size": "1653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ksiga/logutil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "146"
},
{
"name": "Python",
"bytes": "57019"
}
],
"symlink_target": ""
} |
import os
import numpy
import glob
import shutil
import codecs
import time
import sys
os.environ['GLOG_minloglevel'] = '2'
import caffe
from caffe.proto import caffe_pb2
from google.protobuf import text_format
from scipy.misc import imresize, imread
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import zoom
from tempfile import NamedTemporaryFile
from contextlib import contextmanager
from collections import namedtuple
import upsample
import rotate
import expdir
caffe.set_mode_gpu()
caffe.set_device(0)
def create_probe(
directory, dataset, definition, weights, mean, blobs,
colordepth=3,
rotation_seed=None, rotation_power=1,
limit=None, split=None,
batch_size=16, ahead=4,
cl_args=None, verbose=True):
# If we're already done, skip it!
ed = expdir.ExperimentDirectory(directory)
if all(ed.has_mmap(blob=b) for b in blobs):
return
'''
directory: where to place the probe_conv5.mmap files.
data: the AbstractSegmentation data source to draw upon
definition: the filename for the caffe prototxt
weights: the filename for the caffe model weights
mean: to use to normalize rgb values for the network
blobs: ['conv3', 'conv4', 'conv5'] to probe
'''
if verbose:
print 'Opening dataset', dataset
data = loadseg.SegmentationData(args.dataset)
if verbose:
print 'Opening network', definition
np = caffe_pb2.NetParameter()
with open(definition, 'r') as dfn_file:
text_format.Merge(dfn_file.read(), np)
net = caffe.Net(definition, weights, caffe.TEST)
input_blob = net.inputs[0]
input_dim = net.blobs[input_blob].data.shape[2:]
data_size = data.size(split)
if limit is not None:
data_size = min(data_size, limit)
# Make sure we have a directory to work in
ed.ensure_dir()
# Step 0: write a README file with generated information.
ed.save_info(dict(
dataset=dataset,
split=split,
definition=definition,
weights=weights,
mean=mean,
blobs=blobs,
input_dim=input_dim,
rotation_seed=rotation_seed,
rotation_power=rotation_power))
# Clear old probe data
ed.remove_all('*.mmap*')
# Create new (empty) mmaps
if verbose:
print 'Creating new mmaps.'
out = {}
rot = None
if rotation_seed is not None:
rot = {}
for blob in blobs:
shape = (data_size, ) + net.blobs[blob].data.shape[1:]
out[blob] = ed.open_mmap(blob=blob, mode='w+', shape=shape)
# Find the shortest path through the network to the target blob
fieldmap, _ = upsample.composed_fieldmap(np.layer, blob)
# Compute random rotation for each blob, if needed
if rot is not None:
rot[blob] = rotate.randomRotationPowers(
shape[1], [rotation_power], rotation_seed)[0]
ed.save_info(blob=blob, data=dict(
name=blob, shape=shape, fieldmap=fieldmap))
# The main loop
if verbose:
print 'Beginning work.'
pf = loadseg.SegmentationPrefetcher(data, categories=['image'],
split=split, once=True, batch_size=batch_size, ahead=ahead)
index = 0
start_time = time.time()
last_batch_time = start_time
batch_size = 0
for batch in pf.tensor_batches(bgr_mean=mean):
batch_time = time.time()
rate = index / (batch_time - start_time + 1e-15)
batch_rate = batch_size / (batch_time - last_batch_time + 1e-15)
last_batch_time = batch_time
if verbose:
print 'netprobe index', index, 'items per sec', batch_rate, rate
sys.stdout.flush()
inp = batch[0]
batch_size = len(inp)
if limit is not None and index + batch_size > limit:
# Truncate last if limited
batch_size = limit - index
inp = inp[:batch_size]
if colordepth == 1:
inp = numpy.mean(inp, axis=1, keepdims=True)
net.blobs[input_blob].reshape(*(inp.shape))
net.blobs[input_blob].data[...] = inp
result = net.forward(blobs=blobs)
if rot is not None:
for key in out.keys():
result[key] = numpy.swapaxes(numpy.tensordot(
rot[key], result[key], axes=((1,), (1,))), 0, 1)
# print 'Computation done'
for key in out.keys():
out[key][index:index + batch_size] = result[key]
# print 'Recording data in mmap done'
index += batch_size
if index >= data_size:
break
assert index == data_size, (
"Data source should return evey item once %d %d." %
(index, data_size))
if verbose:
print 'Renaming mmaps.'
for blob in blobs:
ed.finish_mmap(out[blob])
# Final step: write the README file
write_readme_file([
('cl_args', cl_args),
('data', data),
('definition', definition),
('weight', weights),
('mean', mean),
('blobs', blobs)], ed, verbose=verbose)
def ensure_dir(targetdir):
if not os.path.isdir(targetdir):
try:
os.makedirs(targetdir)
except:
print 'Could not create', targetdir
pass
def write_readme_file(args, ed, verbose):
'''
Writes a README.txt that describes the settings used to geenrate the ds.
'''
with codecs.open(ed.filename('README.txt'), 'w', 'utf-8') as f:
def report(txt):
f.write('%s\n' % txt)
if verbose:
print txt
title = '%s network probe' % ed.basename()
report('%s\n%s' % (title, '=' * len(title)))
for key, val in args:
if key == 'cl_args':
if val is not None:
report('Command-line args:')
for ck, cv in vars(val).items():
report(' %s: %r' % (ck, cv))
report('%s: %r' % (key, val))
report('\ngenerated at: %s' % time.strftime("%Y-%m-%d %H:%M"))
try:
label = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
report('git label: %s' % label)
except:
pass
if __name__ == '__main__':
import sys
import traceback
import argparse
try:
import loadseg
parser = argparse.ArgumentParser(
description='Probe a caffe network and save results in a directory.')
parser.add_argument(
'--directory',
default='.',
help='output directory for the net probe')
parser.add_argument(
'--blobs',
nargs='*',
help='network blob names to collect')
parser.add_argument(
'--definition',
help='the deploy prototext defining the net')
parser.add_argument(
'--weights',
help='the caffemodel file of weights for the net')
parser.add_argument(
'--mean',
nargs='*', type=float,
help='mean values to subtract from input')
parser.add_argument(
'--dataset',
help='the directory containing the dataset to use')
parser.add_argument(
'--split',
help='the split of the dataset to use')
parser.add_argument(
'--limit',
type=int, default=None,
help='limit dataset to this size')
parser.add_argument(
'--batch_size',
type=int, default=256,
help='the batch size to use')
parser.add_argument(
'--ahead',
type=int, default=4,
help='number of batches to prefetch')
parser.add_argument(
'--rotation_seed',
type=int, default=None,
help='the seed for the random rotation to apply')
parser.add_argument(
'--rotation_power',
type=float, default=1.0,
help='the power of hte random rotation')
parser.add_argument(
'--colordepth',
type=int, default=3,
help='set to 1 for grayscale')
args = parser.parse_args()
create_probe(
args.directory, args.dataset, args.definition, args.weights,
numpy.array(args.mean, dtype=numpy.float32), args.blobs,
batch_size=args.batch_size, ahead=args.ahead, limit=args.limit,
colordepth=args.colordepth,
rotation_seed=args.rotation_seed, rotation_power=args.rotation_power,
split=args.split, cl_args=args, verbose=True)
except:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
| {
"content_hash": "4c3488446739d1e2415ba502884e3ef9",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 99,
"avg_line_length": 33.77220077220077,
"alnum_prop": 0.5735680804847376,
"repo_name": "bonyuta0204/NetDissec",
"id": "850cfe36de8899f1d2b3a9a407814d9837c8aa1d",
"size": "8770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/netprobe_original.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "8261007"
},
{
"name": "Matlab",
"bytes": "17759"
},
{
"name": "Python",
"bytes": "283347"
},
{
"name": "Shell",
"bytes": "34272"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import binascii
import datetime
import graphene
from graphene.types.datetime import Date, Time, DateTime
from graphene.utils.str_converters import to_camel_case
from graphql.language import ast
def factory_type(operation, _type, *args, **kwargs):
if operation == "output":
class GenericType(_type):
class Meta:
model = kwargs.get("model")
name = kwargs.get("name") or to_camel_case(
"{}_Generic_Type".format(kwargs.get("model").__name__)
)
only_fields = kwargs.get("only_fields")
exclude_fields = kwargs.get("exclude_fields")
include_fields = kwargs.get("include_fields")
filter_fields = kwargs.get("filter_fields")
filterset_class = kwargs.get("filterset_class")
registry = kwargs.get("registry")
skip_registry = kwargs.get("skip_registry")
# fields = kwargs.get('fields')
description = "Auto generated Type for {} model".format(
kwargs.get("model").__name__
)
return GenericType
elif operation == "input":
class GenericInputType(_type):
class Meta:
model = kwargs.get("model")
name = kwargs.get("name") or to_camel_case(
"{}_{}_Generic_Type".format(kwargs.get("model").__name__, args[0])
)
only_fields = kwargs.get("only_fields")
exclude_fields = kwargs.get("exclude_fields")
nested_fields = kwargs.get("nested_fields")
registry = kwargs.get("registry")
skip_registry = kwargs.get("skip_registry")
input_for = args[0]
description = "Auto generated InputType for {} model".format(
kwargs.get("model").__name__
)
return GenericInputType
elif operation == "list":
class GenericListType(_type):
class Meta:
model = kwargs.get("model")
name = kwargs.get("name") or to_camel_case(
"{}_List_Type".format(kwargs.get("model").__name__)
)
only_fields = kwargs.get("only_fields")
exclude_fields = kwargs.get("exclude_fields")
filter_fields = kwargs.get("filter_fields")
filterset_class = kwargs.get("filterset_class")
results_field_name = kwargs.get("results_field_name")
pagination = kwargs.get("pagination")
queryset = kwargs.get("queryset")
registry = kwargs.get("registry")
description = "Auto generated list Type for {} model".format(
kwargs.get("model").__name__
)
return GenericListType
return None
class DjangoListObjectBase(object):
def __init__(self, results, count, results_field_name="results"):
self.results = results
self.count = count
self.results_field_name = results_field_name
def to_dict(self):
return {
self.results_field_name: [e.to_dict() for e in self.results],
"count": self.count,
}
def resolver(attr_name, root, instance, info):
if attr_name == "app_label":
return instance._meta.app_label
elif attr_name == "id":
return instance.id
elif attr_name == "model_name":
return instance._meta.model.__name__
class GenericForeignKeyType(graphene.ObjectType):
app_label = graphene.String()
id = graphene.ID()
model_name = graphene.String()
class Meta:
description = " Auto generated Type for a model's GenericForeignKey field "
default_resolver = resolver
class GenericForeignKeyInputType(graphene.InputObjectType):
app_label = graphene.Argument(graphene.String, required=True)
id = graphene.Argument(graphene.ID, required=True)
model_name = graphene.Argument(graphene.String, required=True)
class Meta:
description = " Auto generated InputType for a model's GenericForeignKey field "
# ************************************************ #
# ************** CUSTOM BASE TYPES *************** #
# ************************************************ #
class Binary(graphene.Scalar):
"""
BinaryArray is used to convert a Django BinaryField to the string form
"""
@staticmethod
def binary_to_string(value):
return binascii.hexlify(value).decode("utf-8")
serialize = binary_to_string
parse_value = binary_to_string
@classmethod
def parse_literal(cls, node):
if isinstance(node, ast.StringValue):
return cls.binary_to_string(node.value)
class CustomDateFormat(object):
def __init__(self, date):
self.date_str = date
class CustomTime(Time):
@staticmethod
def serialize(time):
if isinstance(time, CustomDateFormat):
return time.date_str
if isinstance(time, datetime.datetime):
time = time.time()
assert isinstance(
time, datetime.time
), 'Received not compatible time "{}"'.format(repr(time))
return time.isoformat()
class CustomDate(Date):
@staticmethod
def serialize(date):
if isinstance(date, CustomDateFormat):
return date.date_str
if isinstance(date, datetime.datetime):
date = date.date()
assert isinstance(
date, datetime.date
), 'Received not compatible date "{}"'.format(repr(date))
return date.isoformat()
class CustomDateTime(DateTime):
@staticmethod
def serialize(dt):
if isinstance(dt, CustomDateFormat):
return dt.date_str
assert isinstance(
dt, (datetime.datetime, datetime.date)
), 'Received not compatible datetime "{}"'.format(repr(dt))
return dt.isoformat()
| {
"content_hash": "2f498a8e92f519d0f9494da6d7407ef8",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 88,
"avg_line_length": 32.627027027027026,
"alnum_prop": 0.568754141815772,
"repo_name": "eamigo86/graphene-django-extras",
"id": "e78c47de35bc691199c1737c46db8d1a5cc625a1",
"size": "6060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphene_django_extras/base_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "148342"
}
],
"symlink_target": ""
} |
from PyQt4.QtCore import *
a = QString("apple")
b = unicode("baker")
print(a + b)
print(type(a + b)) | {
"content_hash": "f95075a42e53ad7b9359f4a04ed241e7",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 26,
"avg_line_length": 20,
"alnum_prop": 0.65,
"repo_name": "quanhua92/learning-notes",
"id": "64d8665c7ba9ed2801132f930aed8f8a3cec44ae",
"size": "100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "books/rapid_gui_pyqt4/chapter_01/qstring_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1491"
},
{
"name": "C++",
"bytes": "578330"
},
{
"name": "CMake",
"bytes": "2988"
},
{
"name": "CSS",
"bytes": "63793"
},
{
"name": "HTML",
"bytes": "135800"
},
{
"name": "Java",
"bytes": "47446"
},
{
"name": "JavaScript",
"bytes": "14704"
},
{
"name": "Jupyter Notebook",
"bytes": "5373459"
},
{
"name": "Python",
"bytes": "166227"
},
{
"name": "QMake",
"bytes": "16168"
},
{
"name": "XSLT",
"bytes": "7770"
}
],
"symlink_target": ""
} |
"""The security groups extension."""
from oslo_log import log as logging
from oslo_serialization import jsonutils
from webob import exc
from jacket.api.compute.openstack import common
from jacket.api.compute.openstack.compute.schemas import security_groups as \
schema_security_groups
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi
from jacket.compute import cloud
from jacket.compute import exception
from jacket.i18n import _
from jacket.compute.network.security_group import openstack_driver
from jacket.compute.virt import netutils
LOG = logging.getLogger(__name__)
ALIAS = 'os-security-groups'
ATTRIBUTE_NAME = 'security_groups'
authorize = extensions.os_compute_authorizer(ALIAS)
softauth = extensions.os_compute_soft_authorizer(ALIAS)
def _authorize_context(req):
context = req.environ['compute.context']
authorize(context)
return context
class SecurityGroupControllerBase(wsgi.Controller):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver(
skip_policy_check=True))
self.compute_api = cloud.API(
security_group_api=self.security_group_api, skip_policy_check=True)
def _format_security_group_rule(self, context, rule, group_rule_data=None):
"""Return a security group rule in desired API response format.
If group_rule_data is passed in that is used rather than querying
for it.
"""
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
try:
source_group = self.security_group_api.get(
context, id=rule['group_id'])
except exception.SecurityGroupNotFound:
# NOTE(arosen): There is a possible race condition that can
# occur here if two api calls occur concurrently: one that
# lists the security groups and another one that deletes a
# security group rule that has a group_id before the
# group_id is fetched. To handle this if
# SecurityGroupNotFound is raised we return None instead
# of the rule and the caller should ignore the rule.
LOG.debug("Security Group ID %s does not exist",
rule['group_id'])
return
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
elif group_rule_data:
sg_rule['group'] = group_rule_data
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
formatted_rule = self._format_security_group_rule(context, rule)
if formatted_rule:
security_group['rules'] += [formatted_rule]
return security_group
def _from_body(self, body, key):
if not body:
raise exc.HTTPBadRequest(
explanation=_("The request body can't be empty"))
value = body.get(key, None)
if value is None:
raise exc.HTTPBadRequest(
explanation=_("Missing parameter %s") % key)
return value
class SecurityGroupController(SecurityGroupControllerBase):
"""The Security group API controller for the OpenStack API."""
@extensions.expected_errors((400, 404))
def show(self, req, id):
"""Return data about the given security group."""
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
return {'security_group': self._format_security_group(context,
security_group)}
@extensions.expected_errors((400, 404))
@wsgi.response(202)
def delete(self, req, id):
"""Delete a security group."""
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
@extensions.expected_errors(404)
def index(self, req):
"""Returns a list of security groups."""
context = _authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
project_id = context.project_id
raw_groups = self.security_group_api.list(context,
project=project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
@extensions.expected_errors((400, 403))
def create(self, req, body):
"""Creates a new security group."""
context = _authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
try:
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
except exception.SecurityGroupLimitExceeded as exp:
raise exc.HTTPForbidden(explanation=exp.format_message())
return {'security_group': self._format_security_group(context,
group_ref)}
@extensions.expected_errors((400, 404))
def update(self, req, id, body):
"""Update a security group."""
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
security_group_data = self._from_body(body, 'security_group')
group_name = security_group_data.get('name', None)
group_description = security_group_data.get('description', None)
try:
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.update_security_group(
context, security_group, group_name, group_description)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
@extensions.expected_errors((400, 403, 404))
def create(self, req, body):
context = _authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
try:
parent_group_id = self.security_group_api.validate_id(
sg_rule.get('parent_group_id'))
security_group = self.security_group_api.get(context, None,
parent_group_id,
map_exception=True)
new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except (exception.Invalid, exception.InvalidCidr) as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
new_rule['parent_group_id'] = security_group['id']
if 'cidr' in new_rule:
net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
if net not in ('0.0.0.0', '::') and prefixlen == '0':
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
group_rule_data = None
try:
if sg_rule.get('group_id'):
source_group = self.security_group_api.get(
context, id=sg_rule['group_id'])
group_rule_data = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.SecurityGroupLimitExceeded as exp:
raise exc.HTTPForbidden(explanation=exp.format_message())
formatted_rule = self._format_security_group_rule(context,
security_group_rule,
group_rule_data)
return {"security_group_rule": formatted_rule}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self.security_group_api.validate_id(group_id)
# check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
def delete(self, req, id):
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule['parent_group_id']
security_group = self.security_group_api.get(context, None,
group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.NoUniqueMatch as exp:
raise exc.HTTPConflict(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
class ServerSecurityGroupController(SecurityGroupControllerBase):
@extensions.expected_errors(404)
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = _authorize_context(req)
self.security_group_api.ensure_default(context)
try:
instance = common.get_instance(self.compute_api, context,
server_id)
groups = self.security_group_api.get_instance_security_groups(
context, instance, True)
except (exception.SecurityGroupNotFound,
exception.InstanceNotFound) as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver(
skip_policy_check=True))
self.compute_api = cloud.API(
security_group_api=self.security_group_api, skip_policy_check=True)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
instance = common.get_instance(self.compute_api, context, id)
method(context, instance, group_name)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['compute.context']
authorize(context)
group_name = self._parse(body, 'addSecurityGroup')
try:
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
except (exception.SecurityGroupNotFound,
exception.InstanceNotFound) as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.NoUniqueMatch as exp:
raise exc.HTTPConflict(explanation=exp.format_message())
except (exception.SecurityGroupCannotBeApplied,
exception.SecurityGroupExistsForInstance) as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['compute.context']
authorize(context)
group_name = self._parse(body, 'removeSecurityGroup')
try:
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
except (exception.SecurityGroupNotFound,
exception.InstanceNotFound) as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.NoUniqueMatch as exp:
raise exc.HTTPConflict(explanation=exp.format_message())
except exception.SecurityGroupNotExistsForInstance as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = cloud.API(skip_policy_check=True)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver(
skip_policy_check=True))
def _extend_servers(self, req, servers):
# TODO(arosen) this function should be refactored to reduce duplicate
# code and use get_instance_security_groups instead of get_db_instance.
if not len(servers):
return
key = "security_groups"
context = req.environ['compute.context']
if not softauth(context):
return
if not openstack_driver.is_neutron_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[ATTRIBUTE_NAME] = [{"name": group["name"]}
for group in groups]
else:
# If method is a POST we get the security groups intended for an
# instance from the request. The reason for this is if using
# neutron security groups the requested security groups for the
# instance are not in the db and have not been sent to neutron yet.
if req.method != 'POST':
sg_instance_bindings = (
self.security_group_api
.get_instances_security_groups_bindings(context,
servers))
for server in servers:
groups = sg_instance_bindings.get(server['id'])
if groups:
server[ATTRIBUTE_NAME] = groups
# In this section of code len(servers) == 1 as you can only POST
# one server in an API request.
else:
# try converting to json
req_obj = jsonutils.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][ATTRIBUTE_NAME] = req_obj['server'].get(
ATTRIBUTE_NAME, [{'name': 'default'}])
def _show(self, req, resp_obj):
if 'server' in resp_obj.obj:
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
self._extend_servers(req, list(resp_obj.obj['servers']))
class SecurityGroups(extensions.V21APIExtensionBase):
"""Security group support."""
name = "SecurityGroups"
alias = ALIAS
version = 1
def get_controller_extensions(self):
secgrp_output_ext = extensions.ControllerExtension(
self, 'servers', SecurityGroupsOutputController())
secgrp_act_ext = extensions.ControllerExtension(
self, 'servers', SecurityGroupActionController())
return [secgrp_output_ext, secgrp_act_ext]
def get_resources(self):
secgrp_ext = extensions.ResourceExtension(ALIAS,
SecurityGroupController())
server_secgrp_ext = extensions.ResourceExtension(
ALIAS,
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
secgrp_rules_ext = extensions.ResourceExtension(
'os-security-group-rules',
controller=SecurityGroupRulesController())
return [secgrp_ext, server_secgrp_ext, secgrp_rules_ext]
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(self, server_dict, create_kwargs, body_deprecated_param):
security_groups = server_dict.get(ATTRIBUTE_NAME)
if security_groups is not None:
create_kwargs['security_group'] = [
sg['name'] for sg in security_groups if sg.get('name')]
create_kwargs['security_group'] = list(
set(create_kwargs['security_group']))
def get_server_create_schema(self, version):
if version == '2.0':
return schema_security_groups.server_create_v20
return schema_security_groups.server_create
| {
"content_hash": "f3995f14b0b862e4a7423e8a6fe59769",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 79,
"avg_line_length": 43.53875968992248,
"alnum_prop": 0.5838155434879373,
"repo_name": "HybridF5/jacket",
"id": "6c348ade07454cd56ade385ecf7f3d2917734001",
"size": "23140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/api/compute/openstack/compute/security_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
"""Ivmech PID Controller is simple implementation of a Proportional-Integral-Derivative (PID) Controller at Python Programming Language.
More information about PID Controller: http://en.wikipedia.org/wiki/PID_controller
"""
import time
class PID:
"""PID Controller
"""
def __init__(self, P=0.2, I=0.0, D=0.0):
self.Kp = P
self.Ki = I
self.Kd = D
self.sample_time = 0.00
self.current_time = time.time()
self.last_time = self.current_time
self.clear()
def clear(self):
"""Clears PID computations and coefficients"""
self.SetPoint = 0.0
self.PTerm = 0.0
self.ITerm = 0.0
self.DTerm = 0.0
self.last_error = 0.0
# Windup Guard
self.int_error = 0.0
self.windup_guard = 20.0
self.output = 0.0
def update(self, feedback_value):
"""Calculates PID value for given reference feedback
.. math::
u(t) = K_p e(t) + K_i \int_{0}^{t} e(t)dt + K_d {de}/{dt}
.. figure:: images/pid_1.png
:align: center
Test PID with Kp=1.2, Ki=1, Kd=0.001 (test_pid.py)
"""
error = self.SetPoint - feedback_value
self.current_time = time.time()
delta_time = self.current_time - self.last_time
delta_error = error - self.last_error
if (delta_time >= self.sample_time):
self.PTerm = self.Kp * error
self.ITerm += error * delta_time
if (self.ITerm < -self.windup_guard):
self.ITerm = -self.windup_guard
elif (self.ITerm > self.windup_guard):
self.ITerm = self.windup_guard
self.DTerm = 0.0
if delta_time > 0:
self.DTerm = delta_error / delta_time
# Remember last time and last error for next calculation
self.last_time = self.current_time
self.last_error = error
self.output = self.PTerm + (self.Ki * self.ITerm) + (self.Kd * self.DTerm)
def setKp(self, proportional_gain):
"""Determines how aggressively the PID reacts to the current error with setting Proportional Gain"""
self.Kp = proportional_gain
def setKi(self, integral_gain):
"""Determines how aggressively the PID reacts to the current error with setting Integral Gain"""
self.Ki = integral_gain
def setKd(self, derivative_gain):
"""Determines how aggressively the PID reacts to the current error with setting Derivative Gain"""
self.Kd = derivative_gain
def setWindup(self, windup):
"""Integral windup, also known as integrator windup or reset windup,
refers to the situation in a PID feedback controller where
a large change in setpoint occurs (say a positive change)
and the integral terms accumulates a significant error
during the rise (windup), thus overshooting and continuing
to increase as this accumulated error is unwound
(offset by errors in the other direction).
The specific problem is the excess overshooting.
"""
self.windup_guard = windup
def setSampleTime(self, sample_time):
"""PID that should be updated at a regular interval.
Based on a pre-determined sampe time, the PID decides if it should compute or return immediately.
"""
self.sample_time = sample_time
| {
"content_hash": "06acbc3cd079514cafa41daea3ab3c6a",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 136,
"avg_line_length": 33.72549019607843,
"alnum_prop": 0.6052325581395349,
"repo_name": "faturita/ShinkeyBot",
"id": "5e3dfaeb07eee845f4e8d41e08dfb0823db709f3",
"size": "4457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NeoCortex/PID.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "893244"
},
{
"name": "C++",
"bytes": "104189"
},
{
"name": "Makefile",
"bytes": "19980"
},
{
"name": "Objective-C",
"bytes": "129846"
},
{
"name": "Objective-C++",
"bytes": "354124"
},
{
"name": "Python",
"bytes": "124330"
},
{
"name": "Shell",
"bytes": "6840"
}
],
"symlink_target": ""
} |
import os
import subprocess
import sys
from subprocess import PIPE
import requests
import time
def serviceRaise(relativePath, healthUrl, serverName):
os.chdir(relativePath)
with open('./run-out.txt', 'w') as f:
p = subprocess.Popen('mvn spring-boot:run -P dev-standalone', shell=True, stdout=f, stderr=f)
time.sleep(15)
timeout = time.time() + 60 # 20 sec from now
while True:
try:
if time.time() > timeout:
raise TimeoutError("Cannot connect to config on url: " + healthUrl)
time.sleep(3)
print(serverName + " Request performing")
r = requests.get(healthUrl)
data = r.json()
if r.status_code == 200:
print(serverName + " OK")
print(data)
break
except requests.exceptions.ConnectionError:
pass
return p
serviceName = sys.argv[1]
os.environ["MAVEN_OPTS"] = "-Xmx128M"
if serviceName == "":
raise ValueError('A very specific bad thing happened')
if serviceName == "ui":
ui = serviceRaise("../sqap-ui", "http://localhost:8080/health", "UI")
# config = serviceRaise("../sqap-config", "http://localhost:8888/health", "Config")
# #TODO async from here
# discovery = serviceRaise("../sqap-discovery", "http://localhost:8081/health", "Discovery")
# gateway = serviceRaise("../sqap-gateway", "http://localhost:8090/health", "Gateway")
# auth = serviceRaise("../sqap-auth", "http://localhost:8083/health", "Auth")
# #admin = serviceRaise("../sqap-admin", "http://localhost:8884/health", "Admin")
# ui = serviceRaise("../sqap-ui", "http://localhost:8080/health", "UI")
#TODO kill subprocesses on quit
| {
"content_hash": "f6ac86c2567b30cd541cc3c341d02be3",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 97,
"avg_line_length": 30.865384615384617,
"alnum_prop": 0.6623052959501557,
"repo_name": "MarcinMilewski/sqap",
"id": "16186ef3ef4b087e9fa1bc682f18e2938cbf95a3",
"size": "1627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/run-service.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "73"
},
{
"name": "CSS",
"bytes": "4400"
},
{
"name": "Groovy",
"bytes": "12852"
},
{
"name": "HTML",
"bytes": "87909"
},
{
"name": "Java",
"bytes": "167325"
},
{
"name": "JavaScript",
"bytes": "83811"
},
{
"name": "PLpgSQL",
"bytes": "328"
},
{
"name": "Python",
"bytes": "3282"
},
{
"name": "Shell",
"bytes": "66"
},
{
"name": "TypeScript",
"bytes": "140323"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
context = {'title': 'Home Page'}
return render(request, 'index.html', context)
| {
"content_hash": "027d2b5774734cccbd54ee7bae281133",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 49,
"avg_line_length": 26,
"alnum_prop": 0.7307692307692307,
"repo_name": "Rut0/RutoApp",
"id": "faf20550d225b49922f141f9edc1bc1d6526412a",
"size": "182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ruto/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "616"
},
{
"name": "HTML",
"bytes": "1602"
},
{
"name": "Python",
"bytes": "9271"
}
],
"symlink_target": ""
} |
"""Tests for distutils.sysconfig."""
import contextlib
import os
import subprocess
import sys
import pathlib
import pytest
import jaraco.envs
import path
from jaraco.text import trim
import distutils
from distutils import sysconfig
from distutils.ccompiler import get_default_compiler # noqa: F401
from distutils.unixccompiler import UnixCCompiler
from test.support import swap_item
from . import py37compat
@pytest.mark.usefixtures('save_env')
class TestSysconfig:
def test_get_config_h_filename(self):
config_h = sysconfig.get_config_h_filename()
assert os.path.isfile(config_h)
@pytest.mark.skipif("platform.system() == 'Windows'")
@pytest.mark.skipif("sys.implementation.name != 'cpython'")
def test_get_makefile_filename(self):
makefile = sysconfig.get_makefile_filename()
assert os.path.isfile(makefile)
def test_get_python_lib(self, tmp_path):
assert sysconfig.get_python_lib() != sysconfig.get_python_lib(prefix=tmp_path)
def test_get_config_vars(self):
cvars = sysconfig.get_config_vars()
assert isinstance(cvars, dict)
assert cvars
@pytest.mark.skipif('sysconfig.IS_PYPY')
@pytest.mark.skipif('sysconfig.python_build')
@pytest.mark.xfail('platform.system() == "Windows"')
def test_srcdir_simple(self):
# See #15364.
srcdir = pathlib.Path(sysconfig.get_config_var('srcdir'))
assert srcdir.absolute()
assert srcdir.is_dir()
makefile = pathlib.Path(sysconfig.get_makefile_filename())
assert makefile.parent.samefile(srcdir)
@pytest.mark.skipif('sysconfig.IS_PYPY')
@pytest.mark.skipif('not sysconfig.python_build')
def test_srcdir_python_build(self):
# See #15364.
srcdir = pathlib.Path(sysconfig.get_config_var('srcdir'))
# The python executable has not been installed so srcdir
# should be a full source checkout.
Python_h = srcdir.joinpath('Include', 'Python.h')
assert Python_h.is_file()
assert sysconfig._is_python_source_dir(srcdir)
assert sysconfig._is_python_source_dir(str(srcdir))
def test_srcdir_independent_of_cwd(self):
"""
srcdir should be independent of the current working directory
"""
# See #15364.
srcdir = sysconfig.get_config_var('srcdir')
with path.Path('..'):
srcdir2 = sysconfig.get_config_var('srcdir')
assert srcdir == srcdir2
def customize_compiler(self):
# make sure AR gets caught
class compiler:
compiler_type = 'unix'
executables = UnixCCompiler.executables
def __init__(self):
self.exes = {}
def set_executables(self, **kw):
for k, v in kw.items():
self.exes[k] = v
sysconfig_vars = {
'AR': 'sc_ar',
'CC': 'sc_cc',
'CXX': 'sc_cxx',
'ARFLAGS': '--sc-arflags',
'CFLAGS': '--sc-cflags',
'CCSHARED': '--sc-ccshared',
'LDSHARED': 'sc_ldshared',
'SHLIB_SUFFIX': 'sc_shutil_suffix',
# On macOS, disable _osx_support.customize_compiler()
'CUSTOMIZED_OSX_COMPILER': 'True',
}
comp = compiler()
with contextlib.ExitStack() as cm:
for key, value in sysconfig_vars.items():
cm.enter_context(swap_item(sysconfig._config_vars, key, value))
sysconfig.customize_compiler(comp)
return comp
@pytest.mark.skipif("get_default_compiler() != 'unix'")
def test_customize_compiler(self):
# Make sure that sysconfig._config_vars is initialized
sysconfig.get_config_vars()
os.environ['AR'] = 'env_ar'
os.environ['CC'] = 'env_cc'
os.environ['CPP'] = 'env_cpp'
os.environ['CXX'] = 'env_cxx --env-cxx-flags'
os.environ['LDSHARED'] = 'env_ldshared'
os.environ['LDFLAGS'] = '--env-ldflags'
os.environ['ARFLAGS'] = '--env-arflags'
os.environ['CFLAGS'] = '--env-cflags'
os.environ['CPPFLAGS'] = '--env-cppflags'
os.environ['RANLIB'] = 'env_ranlib'
comp = self.customize_compiler()
assert comp.exes['archiver'] == 'env_ar --env-arflags'
assert comp.exes['preprocessor'] == 'env_cpp --env-cppflags'
assert comp.exes['compiler'] == 'env_cc --sc-cflags --env-cflags --env-cppflags'
assert comp.exes['compiler_so'] == (
'env_cc --sc-cflags ' '--env-cflags ' '--env-cppflags --sc-ccshared'
)
assert comp.exes['compiler_cxx'] == 'env_cxx --env-cxx-flags'
assert comp.exes['linker_exe'] == 'env_cc'
assert comp.exes['linker_so'] == (
'env_ldshared --env-ldflags --env-cflags' ' --env-cppflags'
)
assert comp.shared_lib_extension == 'sc_shutil_suffix'
if sys.platform == "darwin":
assert comp.exes['ranlib'] == 'env_ranlib'
else:
assert 'ranlib' not in comp.exes
del os.environ['AR']
del os.environ['CC']
del os.environ['CPP']
del os.environ['CXX']
del os.environ['LDSHARED']
del os.environ['LDFLAGS']
del os.environ['ARFLAGS']
del os.environ['CFLAGS']
del os.environ['CPPFLAGS']
del os.environ['RANLIB']
comp = self.customize_compiler()
assert comp.exes['archiver'] == 'sc_ar --sc-arflags'
assert comp.exes['preprocessor'] == 'sc_cc -E'
assert comp.exes['compiler'] == 'sc_cc --sc-cflags'
assert comp.exes['compiler_so'] == 'sc_cc --sc-cflags --sc-ccshared'
assert comp.exes['compiler_cxx'] == 'sc_cxx'
assert comp.exes['linker_exe'] == 'sc_cc'
assert comp.exes['linker_so'] == 'sc_ldshared'
assert comp.shared_lib_extension == 'sc_shutil_suffix'
assert 'ranlib' not in comp.exes
def test_parse_makefile_base(self, tmp_path):
makefile = tmp_path / 'Makefile'
makefile.write_text(
trim(
"""
CONFIG_ARGS= '--arg1=optarg1' 'ENV=LIB'
VAR=$OTHER
OTHER=foo
"""
)
)
d = sysconfig.parse_makefile(makefile)
assert d == {'CONFIG_ARGS': "'--arg1=optarg1' 'ENV=LIB'", 'OTHER': 'foo'}
def test_parse_makefile_literal_dollar(self, tmp_path):
makefile = tmp_path / 'Makefile'
makefile.write_text(
trim(
"""
CONFIG_ARGS= '--arg1=optarg1' 'ENV=\\$$LIB'
VAR=$OTHER
OTHER=foo
"""
)
)
d = sysconfig.parse_makefile(makefile)
assert d == {'CONFIG_ARGS': r"'--arg1=optarg1' 'ENV=\$LIB'", 'OTHER': 'foo'}
def test_sysconfig_module(self):
import sysconfig as global_sysconfig
assert global_sysconfig.get_config_var('CFLAGS') == sysconfig.get_config_var(
'CFLAGS'
)
assert global_sysconfig.get_config_var('LDFLAGS') == sysconfig.get_config_var(
'LDFLAGS'
)
@pytest.mark.skipif("sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER')")
def test_sysconfig_compiler_vars(self):
# On OS X, binary installers support extension module building on
# various levels of the operating system with differing Xcode
# configurations. This requires customization of some of the
# compiler configuration directives to suit the environment on
# the installed machine. Some of these customizations may require
# running external programs and, so, are deferred until needed by
# the first extension module build. With Python 3.3, only
# the Distutils version of sysconfig is used for extension module
# builds, which happens earlier in the Distutils tests. This may
# cause the following tests to fail since no tests have caused
# the global version of sysconfig to call the customization yet.
# The solution for now is to simply skip this test in this case.
# The longer-term solution is to only have one version of sysconfig.
import sysconfig as global_sysconfig
if sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'):
pytest.skip('compiler flags customized')
assert global_sysconfig.get_config_var('LDSHARED') == sysconfig.get_config_var(
'LDSHARED'
)
assert global_sysconfig.get_config_var('CC') == sysconfig.get_config_var('CC')
@pytest.mark.skipif("not sysconfig.get_config_var('EXT_SUFFIX')")
def test_SO_deprecation(self):
with pytest.warns(DeprecationWarning):
sysconfig.get_config_var('SO')
def test_customize_compiler_before_get_config_vars(self, tmp_path):
# Issue #21923: test that a Distribution compiler
# instance can be called without an explicit call to
# get_config_vars().
file = tmp_path / 'file'
file.write_text(
trim(
"""
from distutils.core import Distribution
config = Distribution().get_command_obj('config')
# try_compile may pass or it may fail if no compiler
# is found but it should not raise an exception.
rc = config.try_compile('int x;')
"""
)
)
p = subprocess.Popen(
py37compat.subprocess_args(sys.executable, file),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
outs, errs = p.communicate()
assert 0 == p.returncode, "Subprocess failed: " + outs
def test_parse_config_h(self):
config_h = sysconfig.get_config_h_filename()
input = {}
with open(config_h, encoding="utf-8") as f:
result = sysconfig.parse_config_h(f, g=input)
assert input is result
with open(config_h, encoding="utf-8") as f:
result = sysconfig.parse_config_h(f)
assert isinstance(result, dict)
@pytest.mark.skipif("platform.system() != 'Windows'")
@pytest.mark.skipif("sys.implementation.name != 'cpython'")
def test_win_ext_suffix(self):
assert sysconfig.get_config_var("EXT_SUFFIX").endswith(".pyd")
assert sysconfig.get_config_var("EXT_SUFFIX") != ".pyd"
@pytest.mark.skipif("platform.system() != 'Windows'")
@pytest.mark.skipif("sys.implementation.name != 'cpython'")
@pytest.mark.skipif(
'\\PCbuild\\'.casefold() not in sys.executable.casefold(),
reason='Need sys.executable to be in a source tree',
)
def test_win_build_venv_from_source_tree(self, tmp_path):
"""Ensure distutils.sysconfig detects venvs from source tree builds."""
env = jaraco.envs.VEnv()
env.create_opts = env.clean_opts
env.root = tmp_path
env.ensure_env()
cmd = [
env.exe(),
"-c",
"import distutils.sysconfig; print(distutils.sysconfig.python_build)",
]
distutils_path = os.path.dirname(os.path.dirname(distutils.__file__))
out = subprocess.check_output(
cmd, env={**os.environ, "PYTHONPATH": distutils_path}
)
assert out == "True"
| {
"content_hash": "4f72e2b5ee2c707c0d685e77c79cdd70",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 88,
"avg_line_length": 38.070234113712374,
"alnum_prop": 0.5922867433892647,
"repo_name": "pypa/setuptools",
"id": "66f92c2ae00e855deea818411334b99b077bc84e",
"size": "11383",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setuptools/_distutils/tests/test_sysconfig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2173"
},
{
"name": "C",
"bytes": "36107"
},
{
"name": "HTML",
"bytes": "266"
},
{
"name": "Python",
"bytes": "4027592"
}
],
"symlink_target": ""
} |
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.dialogflowcx_v3beta1.types import agent
from google.cloud.dialogflowcx_v3beta1.types import agent as gcdc_agent
from google.cloud.location import locations_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import AgentsTransport, DEFAULT_CLIENT_INFO
class AgentsGrpcTransport(AgentsTransport):
"""gRPC backend transport for Agents.
Service for managing
[Agents][google.cloud.dialogflow.cx.v3beta1.Agent].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def list_agents(
self,
) -> Callable[[agent.ListAgentsRequest], agent.ListAgentsResponse]:
r"""Return a callable for the list agents method over gRPC.
Returns the list of all agents in the specified
location.
Returns:
Callable[[~.ListAgentsRequest],
~.ListAgentsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_agents" not in self._stubs:
self._stubs["list_agents"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Agents/ListAgents",
request_serializer=agent.ListAgentsRequest.serialize,
response_deserializer=agent.ListAgentsResponse.deserialize,
)
return self._stubs["list_agents"]
@property
def get_agent(self) -> Callable[[agent.GetAgentRequest], agent.Agent]:
r"""Return a callable for the get agent method over gRPC.
Retrieves the specified agent.
Returns:
Callable[[~.GetAgentRequest],
~.Agent]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_agent" not in self._stubs:
self._stubs["get_agent"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Agents/GetAgent",
request_serializer=agent.GetAgentRequest.serialize,
response_deserializer=agent.Agent.deserialize,
)
return self._stubs["get_agent"]
@property
def create_agent(
self,
) -> Callable[[gcdc_agent.CreateAgentRequest], gcdc_agent.Agent]:
r"""Return a callable for the create agent method over gRPC.
Creates an agent in the specified location.
Note: You should always train a flow prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__.
Returns:
Callable[[~.CreateAgentRequest],
~.Agent]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_agent" not in self._stubs:
self._stubs["create_agent"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Agents/CreateAgent",
request_serializer=gcdc_agent.CreateAgentRequest.serialize,
response_deserializer=gcdc_agent.Agent.deserialize,
)
return self._stubs["create_agent"]
@property
def update_agent(
self,
) -> Callable[[gcdc_agent.UpdateAgentRequest], gcdc_agent.Agent]:
r"""Return a callable for the update agent method over gRPC.
Updates the specified agent.
Note: You should always train a flow prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__.
Returns:
Callable[[~.UpdateAgentRequest],
~.Agent]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_agent" not in self._stubs:
self._stubs["update_agent"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Agents/UpdateAgent",
request_serializer=gcdc_agent.UpdateAgentRequest.serialize,
response_deserializer=gcdc_agent.Agent.deserialize,
)
return self._stubs["update_agent"]
@property
def delete_agent(self) -> Callable[[agent.DeleteAgentRequest], empty_pb2.Empty]:
r"""Return a callable for the delete agent method over gRPC.
Deletes the specified agent.
Returns:
Callable[[~.DeleteAgentRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_agent" not in self._stubs:
self._stubs["delete_agent"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Agents/DeleteAgent",
request_serializer=agent.DeleteAgentRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_agent"]
@property
def export_agent(
self,
) -> Callable[[agent.ExportAgentRequest], operations_pb2.Operation]:
r"""Return a callable for the export agent method over gRPC.
Exports the specified agent to a binary file.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``: An empty `Struct
message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#struct>`__
- ``response``:
[ExportAgentResponse][google.cloud.dialogflow.cx.v3beta1.ExportAgentResponse]
Returns:
Callable[[~.ExportAgentRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "export_agent" not in self._stubs:
self._stubs["export_agent"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Agents/ExportAgent",
request_serializer=agent.ExportAgentRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_agent"]
@property
def restore_agent(
self,
) -> Callable[[agent.RestoreAgentRequest], operations_pb2.Operation]:
r"""Return a callable for the restore agent method over gRPC.
Restores the specified agent from a binary file.
Replaces the current agent with a new one. Note that all
existing resources in agent (e.g. intents, entity types, flows)
will be removed.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``: An empty `Struct
message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#struct>`__
- ``response``: An `Empty
message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#empty>`__
Note: You should always train a flow prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__.
Returns:
Callable[[~.RestoreAgentRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restore_agent" not in self._stubs:
self._stubs["restore_agent"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Agents/RestoreAgent",
request_serializer=agent.RestoreAgentRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["restore_agent"]
@property
def validate_agent(
self,
) -> Callable[[agent.ValidateAgentRequest], agent.AgentValidationResult]:
r"""Return a callable for the validate agent method over gRPC.
Validates the specified agent and creates or updates
validation results. The agent in draft version is
validated. Please call this API after the training is
completed to get the complete validation results.
Returns:
Callable[[~.ValidateAgentRequest],
~.AgentValidationResult]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "validate_agent" not in self._stubs:
self._stubs["validate_agent"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Agents/ValidateAgent",
request_serializer=agent.ValidateAgentRequest.serialize,
response_deserializer=agent.AgentValidationResult.deserialize,
)
return self._stubs["validate_agent"]
@property
def get_agent_validation_result(
self,
) -> Callable[[agent.GetAgentValidationResultRequest], agent.AgentValidationResult]:
r"""Return a callable for the get agent validation result method over gRPC.
Gets the latest agent validation result. Agent
validation is performed when ValidateAgent is called.
Returns:
Callable[[~.GetAgentValidationResultRequest],
~.AgentValidationResult]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_agent_validation_result" not in self._stubs:
self._stubs["get_agent_validation_result"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Agents/GetAgentValidationResult",
request_serializer=agent.GetAgentValidationResultRequest.serialize,
response_deserializer=agent.AgentValidationResult.deserialize,
)
return self._stubs["get_agent_validation_result"]
def close(self):
self.grpc_channel.close()
@property
def cancel_operation(
self,
) -> Callable[[operations_pb2.CancelOperationRequest], None]:
r"""Return a callable for the cancel_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_operation" not in self._stubs:
self._stubs["cancel_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/CancelOperation",
request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["cancel_operation"]
@property
def get_operation(
self,
) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
r"""Return a callable for the get_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_operation" not in self._stubs:
self._stubs["get_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/GetOperation",
request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["get_operation"]
@property
def list_operations(
self,
) -> Callable[
[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
]:
r"""Return a callable for the list_operations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_operations" not in self._stubs:
self._stubs["list_operations"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/ListOperations",
request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
response_deserializer=operations_pb2.ListOperationsResponse.FromString,
)
return self._stubs["list_operations"]
@property
def list_locations(
self,
) -> Callable[
[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse
]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_locations" not in self._stubs:
self._stubs["list_locations"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/ListLocations",
request_serializer=locations_pb2.ListLocationsRequest.SerializeToString,
response_deserializer=locations_pb2.ListLocationsResponse.FromString,
)
return self._stubs["list_locations"]
@property
def get_location(
self,
) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_location" not in self._stubs:
self._stubs["get_location"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/GetLocation",
request_serializer=locations_pb2.GetLocationRequest.SerializeToString,
response_deserializer=locations_pb2.Location.FromString,
)
return self._stubs["get_location"]
@property
def kind(self) -> str:
return "grpc"
__all__ = ("AgentsGrpcTransport",)
| {
"content_hash": "9d3d963eb3e292c17b549dee29c152a9",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 108,
"avg_line_length": 44.166392092257,
"alnum_prop": 0.6222164198590026,
"repo_name": "googleapis/python-dialogflow-cx",
"id": "65d597d22d6eefaed14872f0b2ba9aff4ec2b011",
"size": "27409",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/dialogflowcx_v3beta1/services/agents/transports/grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "10904903"
},
{
"name": "Shell",
"bytes": "30681"
}
],
"symlink_target": ""
} |
'''Database module, including the SQLAlchemy database object and DB-related
mixins.
'''
from .extensions import db
class CRUDMixin(object):
"""Mixin that adds convenience methods for CRUD (create, read, update, delete)
operations.
"""
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
@classmethod
def get_by_id(cls, id):
if any(
(isinstance(id, basestring) and id.isdigit(),
isinstance(id, (int, float))),
):
return cls.query.get(int(id))
return None
@classmethod
def create(cls, **kwargs):
'''Create a new record and save it the database.'''
instance = cls(**kwargs)
return instance.save()
def update(self, commit=True, **kwargs):
'''Update specific fields of a record.'''
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
return commit and self.save() or self
def save(self, commit=True):
'''Save the record.'''
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
'''Remove the record from the database.'''
db.session.delete(self)
return commit and db.session.commit() | {
"content_hash": "817bdc4c6a7311975ae28f9e9f320854",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 82,
"avg_line_length": 28.127659574468087,
"alnum_prop": 0.5885022692889561,
"repo_name": "Nikola-K/fpage",
"id": "3578531cd88b6cd5aa3ba8550cf772d0fdddc6bb",
"size": "1346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fpage/database.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "207326"
},
{
"name": "JavaScript",
"bytes": "250318"
},
{
"name": "Python",
"bytes": "47445"
},
{
"name": "Shell",
"bytes": "110"
}
],
"symlink_target": ""
} |
from sidomo import Container
def say_hello(to):
"""Just say it."""
with Container(
'ubuntu',
stderr=False
) as c:
for line in c.run(
'echo hello %s' % to
):
yield line
if __name__ == '__main__':
for line in say_hello("world"):
print(line)
| {
"content_hash": "9b4ce9c564b1698ec21b1c8609f51029",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 35,
"avg_line_length": 17.944444444444443,
"alnum_prop": 0.47987616099071206,
"repo_name": "suchkultur/sidomotest",
"id": "e54a87da0d63956a39d2ca5e73b636acac0e4579",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ubuntutest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2060"
}
],
"symlink_target": ""
} |
import logging
import os
import sys
import cv2
import numpy as np
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
_MYDIR = os.path.realpath(os.path.dirname(__file__))
DATADIR = os.path.join(_MYDIR, "dataset")
WINDOW_SIDE = 50
WINDOW_SHAPE = np.array((WINDOW_SIDE, WINDOW_SIDE))
def clean_image(rgba_image):
"""Takes alpha channel of the given image and strips empty borders.
Args:
rgba_image: 4-channel image numpy array.
Returns:
2D numpy array of the image alpha channel with empty borders stripped.
"""
image = rgba_image[:,:,3] # Just alpha-channel
for axis in [0, 1]:
for i in list(range(image.shape[axis])[::-1]):
line = np.split(image, [i,i+1], axis=axis)[1]
if np.all(line==0):
image = np.delete(image, i, axis=axis)
return image
def get_dataset():
"""Loads source characters dataset.
Returns:
A dict mapping labels (characers) to their 2d images.
"""
res = {}
dataset_src_dir = os.path.join(DATADIR, 'src')
for f in os.listdir(dataset_src_dir):
label, ext = os.path.splitext(f)
if ext != '.png':
continue
# -1 for alpha channel.
src_image = cv2.imread(os.path.join(dataset_src_dir, f), -1)
res[label] = clean_image(src_image)
return res
def to_slice(start, size):
"""Creates a slice tuple for the given window.
Args:
start: 2-tuple of slice start coordinates.
size: 2-tuple of window size.
Returns:
A tuple of slice objects for the requested window.
"""
return tuple(slice(start_i, start_i + size_i)
for start_i, size_i in zip(start, size))
def pad_image(image, padding_size):
"""Adds zero padding around the image.
Args:
image: 2D numpy array instance.
padding_size: border size of padding to add.
Returns:
A padded 2D image.
"""
padding_offset = np.array((padding_size, padding_size))
padded_shape = np.array(image.shape) + padding_offset * 2
res = np.zeros(shape=padded_shape, dtype=image.dtype)
s = to_slice(padding_offset, image.shape)
res[s] = image
return res
def _all_window_coords(image, window_shape=WINDOW_SHAPE):
"""Generates coordinates for all windows of a given size inside an image.
Args:
image: a 2D numpy array image.
window_shape: 2-tuple of a window size to assume.
Yields:
2-tuples of window coordinates.
"""
for i in range(0, image.shape[0] - window_shape[0]):
for j in range(0, image.shape[1] - window_shape[1]):
yield (i, j)
def _non_empty_windows(image, window_shape=WINDOW_SHAPE):
"""Returns non-zero submatrices of specified size from a given image.
Args:
image: source images to cut windows from.
window_shape: 2-tuple of windows shape.
Yields:
2D numpy arrays of subimages of requested size.
"""
skipped_some = False
for i, j in _all_window_coords(image, window_shape):
idx = to_slice((i,j), window_shape)
window = image[idx]
# More than 1% filled.
num_non_zeros = np.count_nonzero(window)
if (float(num_non_zeros) / window.size) > 0.01:
yield window
else:
if not skipped_some:
log.warning("Skipping empty window.")
skipped_some = True
def main(argv=[], datadir=DATADIR):
dataset = get_dataset()
labels_requested = argv[1:] or None
for label, image in sorted(dataset.items()):
if labels_requested is not None and label not in labels_requested:
continue
log.info("Generating data for label: %s", label)
gen_dir = os.path.join(datadir, 'gen', label)
if not os.path.exists(gen_dir):
os.mkdir(gen_dir)
else:
cleaning = False
for existing in os.listdir(gen_dir):
if not cleaning:
log.info("Cleaning existing data for label: %s",
label)
cleaning = True
os.unlink(os.path.join(gen_dir, existing))
i = -1
for i, w in enumerate(_non_empty_windows(pad_image(image,
WINDOW_SIDE * 0.5))):
fname = os.path.join(gen_dir, "%d.png" % i)
cv2.imwrite(fname, w)
if i == -1:
logging.error("No good images found for label %s", label)
sys.exit(1)
log.info("Wrote %d images for label: %s", i, label)
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "6a4e9476fb0ddace15a290953d831dd2",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 80,
"avg_line_length": 28.754601226993866,
"alnum_prop": 0.5818220610198421,
"repo_name": "xa4a/shredsim",
"id": "78e3cf161df00846daca2c8a9c02d4aaf5df8363",
"size": "4687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shredsim/dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "766"
},
{
"name": "Python",
"bytes": "23266"
}
],
"symlink_target": ""
} |
import pyregion
import numpy as np
import pyspeckit
from astropy import table
import spectral_cube
from paths import (h2copath, mergepath, figurepath, regpath, analysispath,
mpath, hpath, tpath)
import os
from pyspeckit_fitting import simplemodel, simple_fitter, simple_fitter2
try:
from pyspeckit_fitting import h2co_radex_fitter
radexfit=True
except ImportError:
radexfit=False
from astropy.io import fits
import photutils
from astropy import coordinates
from astropy import units as u
from astropy import log
from astropy.utils.console import ProgressBar
import pylab as pl
from higal_gridded import dusttem_image, column_image
import copy
from full_cubes import cube_merge_high as cube
from noise import noise, noisehdr
noiseokmask = np.isfinite(noise)
etamb = 0.75 # http://www.apex-telescope.org/telescope/efficiency/
cube._data /= etamb
noise /= etamb
with open(regpath+'spectral_ncomp.txt') as f:
pars = eval(f.read())
parmap_simple = {'ampH2CO':'AMPLITUDE',
'ampCH3OH':'AMPCH3OH',
'width':'WIDTH',
'center':'VELOCITY',
'h2coratio':'RATIO',}
parmap_simple2 = {'ampH2CO':'AMPLITUDE',
'ampCH3OH':'AMPCH3OH',
'width':'WIDTH',
'center':'VELOCITY',
'h2coratio321303':'RATIO321303X',
'h2coratio322321':'RATIO322321X',}
parmap_simple2_spline = {'spline_ampH2CO':'AMPLITUDE',
'spline_ampCH3OH':'AMPCH3OH',
'spline_width':'WIDTH',
'spline_center':'VELOCITY',
'spline_h2coratio321303':'RATIO321303X',
'spline_h2coratio322321':'RATIO322321X',}
#parmap_radex = {
# 'temperature':'TEMPERATURE',
# 'density':'DENSITY',
# 'column':'COLUMN',
# 'denswidth':'WIDTH',
# 'denscenter':'CENTER',}
def set_row(parinfo, ncomp, rows, parmap):
assert ncomp == len(rows)
for ii,row in enumerate(rows):
row['ComponentID'] = ii
for par in parmap:
row[par] = parinfo[parmap[par]+str(ii)].value
row["e"+par] = parinfo[parmap[par]+str(ii)].error
font_sizes = {1: 20,
2: 15,
3: 11,
4: 8}
def fit_a_spectrum(sp, radexfit=False, write=True, vlimits=(-105,125),
pars=pars):
sp.plotter.autorefresh=False
sp.plotter(figure=1)
ncomp = pars[sp.specname]['ncomp']
if ncomp == 0:
log.info("Skipping {0} - no velocity components detected.".format(ncomp))
return
returns = [ncomp]
velos = pars[sp.specname]['velo']
spname = sp.specname.replace(" ","_")
width_min = 1
if 'width_max' in pars[sp.specname]:
width_max = pars[sp.specname]['width_max']
elif 'Map' in sp.specname or 'box' in sp.specname:
width_max = 40
else:
width_max = 15
sp.specfit.Registry.add_fitter('h2co_simple', simple_fitter2, 6,
multisingle='multi')
guesses_simple = [x for ii in range(ncomp)
for x in (sp.data.max(),velos[ii],5,0.5,1.0,sp.data.max())]
if not(min(velos) > vlimits[0] and max(velos) < vlimits[1]):
log.warn("A velocity guess {0} is outside limits {1}." .format(velos,
vlimits))
vlimits = (min(velos)-25, max(velos)+25)
log.warn("Changing limits to {0}".format(vlimits))
sp.specfit(fittype='h2co_simple', multifit=True,
guesses=guesses_simple,
limited=[(True,True)] * 6,
limits=[(0,20),vlimits,(width_min,width_max),(0,1),(0.3,1.1),(0,1e5)],
)
sp.baseline(excludefit=True, subtract=True, highlight_fitregion=True, order=1)
sp.plotter(clear=True)
sp.specfit(fittype='h2co_simple', multifit=True,
guesses=guesses_simple,
limited=[(True,True)] * 6,
limits=[(0,20),vlimits,(width_min,width_max),(0,1),(0.3,1.1),(0,1e5)],
)
returns.append(copy.copy(sp.specfit.parinfo))
err = sp.error.mean()
sp.plotter()
sp.specfit.plot_fit(show_components=True)
sp.specfit.annotate(fontsize=font_sizes[ncomp])
sp.specfit.plotresiduals(axis=sp.plotter.axis, yoffset=-err*5, clear=False,
color='#444444', label=False)
sp.plotter.axis.set_ylim(sp.plotter.ymin-err*5, sp.plotter.ymax)
sp.plotter.savefig(os.path.join(figurepath,
"simple/{0}_fit_4_lines_simple.pdf".format(spname)))
if write:
sp.write(mpath("spectra/{0}_spectrum.fits".format(spname)))
# This will mess things up for the radexfit (maybe in a good way) but *cannot*
# be done after the radexfit
# Set the spectrum to be the fit residuals. The linear baseline has
# already been subtracted from both the data and the residuals
linear_baseline = sp.baseline.basespec
sp.baseline.unsubtract()
fitted_residuals = sp.baseline.spectofit = sp.specfit.residuals
sp.baseline.includemask[:] = True # Select ALL residuals
sp.baseline.fit(spline=True, order=3, spline_sampling=50)
spline_baseline = sp.baseline.basespec
sp.data -= spline_baseline + linear_baseline
sp.baseline.subtracted = True
sp.error[:] = sp.stats((218.5e9,218.65e9))['std']
sp.specfit(fittype='h2co_simple', multifit=True,
guesses=guesses_simple,
limited=[(True,True)] * 6,
limits=[(0,1e5),vlimits,(width_min,width_max),(0,1),(0.3,1.1),(0,1e5)],
)
sp.plotter()
sp.plotter.axis.plot(sp.xarr, spline_baseline-err*5, color='orange',
alpha=0.75, zorder=-1, linewidth=2)
sp.specfit.plot_fit(show_components=True)
sp.specfit.annotate(fontsize=font_sizes[ncomp])
sp.plotter.axis.plot(sp.xarr, fitted_residuals-err*5, color="#444444",
linewidth=0.5, drawstyle='steps-mid')
#sp.specfit.plotresiduals(axis=sp.plotter.axis, yoffset=-err*5, clear=False,
# color='#444444', label=False)
sp.plotter.axis.set_ylim(sp.plotter.ymin-err*5, sp.plotter.ymax)
sp.plotter.savefig(os.path.join(figurepath,
"simple/{0}_fit_4_lines_simple_splinebaselined.pdf".format(spname)))
returns.append(copy.copy(sp.specfit.parinfo))
if write:
sp.write(mpath("spectra/{0}_spectrum_basesplined.fits".format(spname)))
if radexfit:
guesses = [x for ii in range(ncomp)
for x in (100,14,4.5,
sp.specfit.parinfo['VELOCITY{0}'.format(ii)].value,
(sp.specfit.parinfo['WIDTH{0}'.format(ii)].value
if
(sp.specfit.parinfo['WIDTH{0}'.format(ii)].value
< width_max and
sp.specfit.parinfo['WIDTH{0}'.format(ii)].value
> width_min)
else 5))
]
sp.specfit.Registry.add_fitter('h2co_mm_radex', h2co_radex_fitter, 5,
multisingle='multi')
sp.specfit(fittype='h2co_mm_radex', multifit=True,
guesses=guesses,
limits=[(10,300),(11,15),(3,5.5),(-105,125),(width_min,width_max)]*ncomp,
limited=[(True,True)]*5*ncomp,
fixed=[False,False,False,True,True]*ncomp,
quiet=False,)
sp.plotter.savefig(os.path.join(figurepath,
"radex/{0}_fit_h2co_mm_radex.pdf".format(spname)))
returns.append(copy.copy(sp.specfit.parinfo))
return returns
# Use the individual spectral line cubes because they have been more
# cleanly baselined
# (unfortunately, this doesn't appar to work)
#h2co303 = pyspeckit.Cube(hpath('APEX_H2CO_303_202_bl.fits'))
#h2co322 = pyspeckit.Cube(hpath('APEX_H2CO_322_221_bl.fits'))
#h2co321 = pyspeckit.Cube(hpath('APEX_H2CO_321_220_bl.fits'))
#cube = pyspeckit.CubeStack([h2co303,h2co321,h2co322])
#cube.xarr.refX = 218222190000.0
#cube.xarr.refX_units = 'Hz'
#cube = pyspeckit.Cube(os.path.join(mergepath, 'APEX_H2CO_merge_high_sub.fits'))
#cube.cube /= etamb
#errorcube = noise[None,:,:] * np.ones(cube.cube.shape)
def load_spectra(regs, cube):
spectra = {}
xarr = pyspeckit.units.SpectroscopicAxis(cube.spectral_axis.value,
unit=str(cube.spectral_axis.unit),
refX=cube.wcs.wcs.restfrq,
refX_units='Hz')
for region_number,reg in enumerate(regs):
name = reg.attr[1]['text']
log.info("Loading {0}".format(name))
if name not in spectra:
#sp = cube.get_apspec(reg.coord_list,coordsys='galactic',wunit='degree')
shape = pyregion.ShapeList([reg])
mask = shape.get_mask(header=noisehdr, shape=noise.shape)
scube = cube.subcube_from_ds9region(shape)
data = scube.apply_numpy_function(np.nanmean, axis=(1,2))
error = ((noise[mask & noiseokmask]**2).sum()**0.5/np.count_nonzero(mask))
sp = pyspeckit.Spectrum(data=data,
error=np.ones(data.size)*error,
xarr=xarr, header=cube.wcs.to_header())
sp.header['ERROR'] = error
sp.error[:] = sp.stats((218.5e9,218.65e9))['std']
sp.specname = reg.attr[1]['text']
# Error is already computed above; this is an old hack
#sp.error[:] = sp.stats((218e9,218.1e9))['std']
spectra[name] = sp
sp.unit = "$T_{MB}$ (K)"
else:
sp = spectra[name]
return spectra
if __name__ == "__main__":
pl.ioff()
pl.close(1)
pl.figure(1).clf()
radexfit=False # not super useful...
regs = (pyregion.open(regpath+'spectral_apertures.reg') +
pyregion.open(regpath+'target_fields_8x8_gal.reg'))
#regs = regs[:8]
log.info(str({r.attr[1]['text']:r for r in regs}))
name_column = table.Column(data=[reg.attr[1]['text']
for reg in regs
for ii in range(pars[reg.attr[1]['text']]['ncomp'])],
name='Source_Name')
comp_id_column = table.Column(data=[0]*name_column.size, name='ComponentID')
lon_column = table.Column(data=[reg.coord_list[0]
for reg in regs
for ii in range(pars[reg.attr[1]['text']]['ncomp'])
],
name='GLON')
lat_column = table.Column(data=[reg.coord_list[1] for reg in regs
for ii in range(pars[reg.attr[1]['text']]['ncomp'])
],
name='GLAT')
columns = [table.Column(name="{ee}{name}".format(name=name, ee=ee),
dtype='float',
length=name_column.size)
for name in (parmap_simple2.keys() +# parmap_radex.keys() +
parmap_simple2_spline.keys())
for ee in ['','e']
]
columns += [table.Column(name="{name}".format(name=name, ee=ee),
dtype='float',
length=name_column.size)
for name in ['boxwidth', 'boxheight', 'radius', 'area',
'posang'] ]
# is the fit good enough to use for plotting?
columns += [table.Column(data=[pars[reg.attr[1]['text']]['good']
for reg in regs
for ii in range(pars[reg.attr[1]['text']]['ncomp'])
],
name='is_good', dtype='int')]
out_table = table.Table([name_column, comp_id_column, lon_column, lat_column] +
columns)
surfdens = []
dusttem = []
log.info("Herschel parameter extraction.")
herschelok = np.isfinite(column_image.data) & np.isfinite(dusttem_image.data)
for reg in ProgressBar(regs):
mask = pyregion.ShapeList([reg]).get_mask(column_image) & herschelok
for ii in range(pars[reg.attr[1]['text']]['ncomp']):
surfdens.append(column_image.data[mask].mean()*1e22)
dusttem.append(dusttem_image.data[mask].mean())
surfdens_column = table.Column(data=surfdens, dtype='float',
name='higalcolumndens')
dusttem_column = table.Column(data=dusttem, dtype='float', name='higaldusttem')
out_table.add_column(surfdens_column)
out_table.add_column(dusttem_column)
row_number = 0
spectra = load_spectra(regs, cube)
for region_number,reg in enumerate(regs):
name = reg.attr[1]['text']
sp = spectra[name]
log.info("Fitting {0}".format(name))
returns = fit_a_spectrum(sp, radexfit=radexfit)
if returns is None:
continue
elif radexfit:
ncomp, pinf1, pinf2, pinf3 = returns
else:
ncomp, pinf1, pinf2 = returns
if reg.name == 'box':
out_table[row_number:row_number+ncomp]['boxwidth'] = reg.coord_list[2]
out_table[row_number:row_number+ncomp]['boxheight'] = reg.coord_list[3]
out_table[row_number:row_number+ncomp]['area'] = reg.coord_list[2] * reg.coord_list[3]
out_table[row_number:row_number+ncomp]['posang'] = reg.coord_list[4]
elif reg.name == 'circle':
out_table[row_number:row_number+ncomp]['area'] = reg.coord_list[2]**2 * np.pi
out_table[row_number:row_number+ncomp]['radius'] = reg.coord_list[2]
else:
raise ValueError("Unsupported region. Implement it if you want it.")
set_row(pinf1, ncomp, out_table[row_number:row_number+ncomp],
parmap=parmap_simple2)
set_row(pinf2, ncomp, out_table[row_number:row_number+ncomp],
parmap=parmap_simple2_spline)
if radexfit and False: # hard-coded out the radex par mapping above
set_row(pinf3, ncomp,
out_table[row_number:row_number+ncomp], parmap=parmap_radex)
row_number = row_number + ncomp
out_table.write(tpath("fitted_line_parameters.ipac"),
format='ascii.ipac')
| {
"content_hash": "8095ddbffb0d7389e05ec861bae6a7af",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 104,
"avg_line_length": 40.6398891966759,
"alnum_prop": 0.5564037897893804,
"repo_name": "adamginsburg/APEX_CMZ_H2CO",
"id": "ac8bb523fa81480985d19135df395a0f5579738d",
"size": "14671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis/individual_spectra.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "840749"
},
{
"name": "Shell",
"bytes": "3036"
},
{
"name": "TeX",
"bytes": "133946"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('movielists', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='image',
field=models.ImageField(upload_to=b'movielists/movie_images/'),
preserve_default=True,
),
]
| {
"content_hash": "fd922cd47c71c8190002f6184951f16c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 22.526315789473685,
"alnum_prop": 0.5934579439252337,
"repo_name": "kiriakosv/movie-recommendator",
"id": "d829412099e71dc1855b7ad8410a3635df8e5be4",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moviesite/movielists/migrations/0002_auto_20150409_1931.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2101"
},
{
"name": "HTML",
"bytes": "9505"
},
{
"name": "Python",
"bytes": "36726"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, with_statement
import logging
from urllib import urlencode
from mom.functional import select_dict, map_dict
from pyoauth._compat import urljoin
from pyoauth.constants import OPENID_MODE_CHECK_AUTHENTICATION, \
HEADER_CONTENT_TYPE, HTTP_POST, OAUTH_VALUE_CALLBACK_OOB, \
OAUTH_PARAM_TOKEN, OAUTH_PARAM_VERIFIER, OPENID_MODE_CHECKID_SETUP, \
OPENID_AX_MODE_FETCH_REQUEST
from pyoauth.url import url_add_query
from pyoauth.http import RequestAdapter, CONTENT_TYPE_FORM_URLENCODED
class OpenIdMixin(object):
"""
Abstract implementation of OpenID and Attribute Exchange.
Useful for Hybrid OAuth+OpenID auth.
See GoogleMixin for example implementation. Use it with an
HttpAdapterMixin class.
http://code.google.com/apis/accounts/docs/OpenID.html
"""
# Implement this in subclasses.
_OPENID_ENDPOINT = None
ATTRIB_EMAIL = "http://axschema.org/contact/email"
ATTRIB_COUNTRY = "http://axschema.org/contact/country/home"
ATTRIB_LANGUAGE = "http://axschema.org/pref/language"
ATTRIB_USERNAME = "http://axschema.org/namePerson/friendly"
ATTRIB_FIRST_NAME = "http://axschema.org/namePerson/first"
ATTRIB_FULL_NAME = "http://axschema.org/namePerson"
ATTRIB_LAST_NAME = "http://axschema.org/namePerson/last"
SPEC_IDENTIFIER_SELECT= "http://specs.openid.net/auth/2.0/identifier_select"
SPEC_OPENID_NS = "http://specs.openid.net/auth/2.0"
SPEC_OAUTH_NS = "http://specs.openid.net/extensions/oauth/1.0"
SPEC_AX_NS = "http://openid.net/srv/ax/1.0"
def authenticate_redirect(self, callback_uri=None, ax_attrs=None,
oauth_scope=None):
"""
Redirects to the authentication URL for this service.
After authentication, the service will redirect back to the given
callback URI.
We request the given attributes for the authenticated user by default
(name, email, language, and username). If you don't need all those
attributes for your app, you can request fewer with the ax_attrs keyword
argument.
:param callback_uri:
The URL to redirect to after authentication.
:param ax_attrs:
List of Attribute Exchange attributes to be fetched.
:returns:
None
"""
ax_attrs = ax_attrs or ("name", "email",
"language", "username", "country")
callback_uri = callback_uri or self.adapter_request_path
args = self._openid_args(callback_uri, ax_attrs, oauth_scope)
self.adapter_redirect(url_add_query(self._OPENID_ENDPOINT, args))
def get_authenticated_user(self, callback):
"""
Fetches the authenticated user data upon redirect.
This method should be called by the handler that handles the callback
URL to which the service redirects when the authenticate_redirect()
or authorize_redirect() methods are called.
:param callback:
A function that is called after the authentication attempt. It is
called passing a dictionary with the requested user attributes or
None if the authentication failed.
"""
request_arguments = self.adapter_request_params
http = self.adapter_http_client
# Verify the OpenID response via direct request to the OP
args = map_dict(lambda k, v: (k, v[-1]), request_arguments)
args["openid.mode"] = OPENID_MODE_CHECK_AUTHENTICATION
url = self._OPENID_ENDPOINT
response = http.fetch(RequestAdapter(
HTTP_POST, url, urlencode(args), {
HEADER_CONTENT_TYPE: CONTENT_TYPE_FORM_URLENCODED,
}
))
self._on_authentication_verified(callback, response)
def _openid_args(self, callback_uri, ax_attrs=None, oauth_scope=None):
"""
Builds and returns the OpenID arguments used in the authentication
request.
:param callback_uri:
The URL to redirect to after authentication.
:param ax_attrs:
List of Attribute Exchange attributes to be fetched.
:param oauth_scope:
OAuth scope.
:returns:
A dictionary of arguments for the authentication URL.
"""
ax_attrs = ax_attrs or ()
url = urljoin(self.adapter_request_full_url, callback_uri)
request_host = self.adapter_request_host
#request_protocol = self.adapter_request_scheme
args = {
"openid.ns": self.SPEC_OPENID_NS,
"openid.claimed_id": self.SPEC_IDENTIFIER_SELECT,
"openid.identity": self.SPEC_IDENTIFIER_SELECT,
"openid.return_to": url,
#"openid.realm": request_protocol + "://" + request_host + "/",
# See:
# https://github.com/facebook/tornado/commit/1882670c5f9dd9be5840e1fac91e3ef98ba1deeb
"openid.realm": urljoin(url, "/"),
"openid.mode": OPENID_MODE_CHECKID_SETUP,
}
if ax_attrs:
args.update({
"openid.ns.ax": self.SPEC_AX_NS,
"openid.ax.mode": OPENID_AX_MODE_FETCH_REQUEST,
})
ax_attrs = set(ax_attrs)
required = []
if "name" in ax_attrs:
ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
required += ["firstname", "fullname", "lastname"]
args.update({
"openid.ax.type.firstname": self.ATTRIB_FIRST_NAME,
"openid.ax.type.fullname": self.ATTRIB_FULL_NAME,
"openid.ax.type.lastname": self.ATTRIB_LAST_NAME,
})
known_attrs = {
"email": self.ATTRIB_EMAIL,
"country": self.ATTRIB_COUNTRY,
"language": self.ATTRIB_LANGUAGE,
"username": self.ATTRIB_USERNAME,
}
for name in ax_attrs:
args["openid.ax.type." + name] = known_attrs[name]
required.append(name)
args["openid.ax.required"] = ",".join(required)
if oauth_scope:
args.update({
"openid.ns.oauth": self.SPEC_OAUTH_NS,
"openid.oauth.consumer": request_host.split(":")[0],
"openid.oauth.scope": oauth_scope,
})
return args
def _on_authentication_verified(self, callback, response):
"""
Called after the authentication attempt. It calls the callback function
set when the authentication process started, passing a dictionary of
user data if the authentication was successful or None if it failed.
:param callback:
A function that is called after the authentication attempt
"""
if not response:
logging.warning("Missing OpenID response.")
callback(None)
return
elif response.error or "is_value:true" not in response.body:
logging.warning("Invalid OpenID response (%s): %r",
str(response.status) + response.reason,
response.body)
callback(None)
return
request_arguments = self.adapter_request_params
# Make sure we got back at least an email from Attribute Exchange.
ax_ns = None
for name, values in request_arguments.items():
if name.startswith("openid.ns.") and values[-1] == SPEC_AX_NS:
ax_ns = name[10:]
break
ax_args = self._get_ax_args(request_arguments, ax_ns)
def get_ax_arg(uri, ax_args=ax_args, ax_ns=ax_ns):
ax_name = self._get_ax_name(ax_args, uri, ax_ns)
return self.adapter_request_get(ax_name, "")
claimed_id = self.adapter_request_get("openid.claimed_id", "")
name = get_ax_arg(self.ATTRIB_FULL_NAME)
first_name = get_ax_arg(self.ATTRIB_FIRST_NAME)
last_name = get_ax_arg(self.ATTRIB_LAST_NAME)
username = get_ax_arg(self.ATTRIB_USERNAME)
email = get_ax_arg(self.ATTRIB_EMAIL)
locale = get_ax_arg(self.ATTRIB_LANGUAGE).lower()
country = get_ax_arg(self.ATTRIB_COUNTRY)
user = self._get_user_dict(name, first_name, last_name, username,
email, locale, country, claimed_id)
callback(user)
@classmethod
def _get_user_dict(cls, name, first_name, last_name, username, email,
locale, country, claimed_id):
user = dict()
name_parts = []
# First name and last name.
if first_name:
user["first_name"] = first_name
name_parts.append(first_name)
if last_name:
user["last_name"] = last_name
name_parts.append(last_name)
# Full name.
if name:
user["name"] = name
elif name_parts:
user["name"] = u" ".join(name_parts)
elif email:
user["name"] = email.split("@")[0]
# Other properties.
if email:
user["email"] = email
if locale:
user["locale"] = locale
if username:
user["username"] = username
if country:
user["country"] = country
if claimed_id:
user["claimed_id"] = claimed_id
return user
@classmethod
def _get_ax_args(cls, request_arguments, ax_ns):
if not ax_ns:
return {}
prefix = "openid." + ax_ns + ".type."
return select_dict(lambda k, v: k.startswith(prefix), request_arguments)
@classmethod
def _get_ax_name(cls, ax_args, uri, ax_ns):
"""
Returns an Attribute Exchange value from the request.
:param ax_args:
Attribute Exchange-specific request arguments.
:param uri:
Attribute Exchange URI.
:param ax_ns:
Attribute Exchange namespace.
:returns:
The Attribute Exchange value, if found in the request.
"""
if not ax_ns:
return ""
ax_name = ""
prefix = "openid." + ax_ns + ".type."
for name, values in ax_args.items():
if values[-1] == uri:
part = name[len(prefix):]
ax_name = "openid." + ax_ns + ".value." + part
break
return ax_name
class OAuthMixin(object):
"""
Framework-agnostic OAuth 1.0 handler mixin implementation.
"""
@property
def oauth_client(self):
raise NotImplementedError(
"This property must be overridden by a derivative "
"mixin to return an OAuth client instance."
)
def authorize_redirect(self, callback_uri=OAUTH_VALUE_CALLBACK_OOB,
realm=None,
*args, **kwargs):
"""
Redirects the resource owner to obtain OAuth authorization for this
service.
You should call this method to log the user in, and then call
:func:`get_authenticated_user` in the handler you registered
as your callback URL to complete the authorization process.
This method sets a cookie called
``_oauth_temporary_credentials`` which is subsequently used (and
cleared) in :func:`get_authenticated_user` for security purposes.
:param callback_uri:
The callback URI path. For example, ``/auth/ready?format=json``
The host on which this handler is running will be used as the
base URI for this path.
:param realm:
The OAuth authorization realm.
"""
self._auth_redirect(callback_uri, realm, False)
def authenticate_redirect(self, callback_uri=OAUTH_VALUE_CALLBACK_OOB,
realm=None,
*args, **kwargs):
"""
Just like authorize_redirect(), but auto-redirects if authorized.
This is generally the right interface to use if you are using
single sign-on.
Override this method in subclasses if authentication URLs are not
supported.
"""
# Ask for temporary credentials, and when we get them, redirect
# to authentication URL.
self._auth_redirect(callback_uri, realm, True)
def _auth_redirect(self, callback_uri, realm, authenticate):
"""
Redirects the resource owner to obtain OAuth authorization for this
service.
You should call this method to log the user in, and then call
:func:`get_authenticated_user` in the handler you registered
as your callback URL to complete the authorization process.
This method sets a cookie called
``_oauth_temporary_credentials`` which is subsequently used (and
cleared) in :func:`get_authenticated_user` for security purposes.
:param callback_uri:
The callback URI path. For example, ``/auth/ready?format=json``
The host on which this handler is running will be used as the
base URI for this path.
:param realm:
The OAuth authorization realm.
:param authenticate:
Internal parameter. Not meant for use in client code.
When set to ``True``, the resource owner will be redirected
to an "authentication" URL instead of an "authorization" URL.
Authentication URLs automatically redirect back to the application
if the application is already authorized.
"""
callback_uri = callback_uri or OAUTH_VALUE_CALLBACK_OOB
if callback_uri and callback_uri != OAUTH_VALUE_CALLBACK_OOB:
callback_uri = urljoin(self.adapter_request_full_url, callback_uri)
# Ask for temporary credentials, and when we get them, redirect
# to either the authentication or authorization URL.
#async_callback = partial(self._on_temporary_credentials,
# authenticate=authenticate)
temp, _ = self.oauth_client.fetch_temporary_credentials(
realm=realm,
oauth_callback=callback_uri
# async_callback=async_callback
)
self._on_temporary_credentials(authenticate, temp)
def _on_temporary_credentials(self, authenticate, credentials):
# Obtain the temporary credentials from the response
# and save them temporarily in a session cookie.
self.adapter_set_credentials_cookie(credentials)
if authenticate:
# Redirects to the authentication URL.
url = self.oauth_client.get_authentication_url(credentials)
else:
# Redirects to the authorization URL.
url = self.oauth_client.get_authorization_url(credentials)
self.adapter_redirect(url)
def get_authenticated_user(self, callback, realm=None):
"""
Gets the OAuth authorized user and access token on callback.
This method should be called from the handler for your registered
OAuth callback URL to complete the registration process. We call
callback with the authenticated user, which in addition to standard
attributes like 'name' includes the 'access_key' attribute, which
contains the OAuth access you can use to make authorized requests
to this service on behalf of the user.
:param callback:
The callback that will be called upon successful authorization
with the user object as its first argument.
:param realm:
The realm for the authorization header.
"""
oauth_token = self.adapter_request_get(OAUTH_PARAM_TOKEN)
oauth_verifier = self.adapter_request_get(OAUTH_PARAM_VERIFIER)
# Obtain the temporary credentials saved in the browser cookie.
temp = self.adapter_read_credentials_cookie()
# Verify that the oauth_token matches the one sent by the server
# in the query string.
self.oauth_client.check_verification_code(
temp, oauth_token, oauth_verifier
)
# Ask for token credentials.
token, _ = self.oauth_client.fetch_token_credentials(
temp, oauth_verifier=oauth_verifier, realm=realm
)
#self._oauth_get_user(token, callback)
def _oauth_get_user(self, token_credentials, callback):
raise NotImplementedError("OAuth mixin subclass authors must implement this.")
def _on_oauth_get_user(self, token_credentials, callback, user):
if not user:
callback(None)
else:
user["oauth_token_credentials"] = token_credentials.to_dict()
# For compatibility with tornado.
user["access_token"] = token_credentials.to_dict()
callback(user)
| {
"content_hash": "bff617e1ee099ce630326bc779ceba2a",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 97,
"avg_line_length": 39.882075471698116,
"alnum_prop": 0.6043761088113542,
"repo_name": "gorakhargosh/pyoauth",
"id": "e452808e4183bf0d6fd90606fc5bbfe778b7c6d2",
"size": "17673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyoauth/oauth1/client/mixins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "306721"
},
{
"name": "Shell",
"bytes": "5036"
}
],
"symlink_target": ""
} |
'''OpenGL extension EXT.cull_vertex
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_cull_vertex'
_DEPRECATED = False
GL_CULL_VERTEX_EXT = constant.Constant( 'GL_CULL_VERTEX_EXT', 0x81AA )
GL_CULL_VERTEX_EYE_POSITION_EXT = constant.Constant( 'GL_CULL_VERTEX_EYE_POSITION_EXT', 0x81AB )
GL_CULL_VERTEX_OBJECT_POSITION_EXT = constant.Constant( 'GL_CULL_VERTEX_OBJECT_POSITION_EXT', 0x81AC )
glCullParameterdvEXT = platform.createExtensionFunction(
'glCullParameterdvEXT',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,arrays.GLdoubleArray,),
doc='glCullParameterdvEXT(GLenum(pname), GLdoubleArray(params)) -> None',
argNames=('pname','params',),
deprecated=_DEPRECATED,
)
glCullParameterfvEXT = platform.createExtensionFunction(
'glCullParameterfvEXT',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,arrays.GLfloatArray,),
doc='glCullParameterfvEXT(GLenum(pname), GLfloatArray(params)) -> None',
argNames=('pname','params',),
deprecated=_DEPRECATED,
)
def glInitCullVertexEXT():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| {
"content_hash": "23f67b157b7ae6a67c24200837803305",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 102,
"avg_line_length": 37.027027027027025,
"alnum_prop": 0.7846715328467153,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "0d18503f724997348429c6215d3bf45225cadd5d",
"size": "1370",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/raw/GL/EXT/cull_vertex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import sys
import os
import argparse
import numpy as np
import pickle
import chainer
from chainer import serializers
import chainer.links as L
import chainer.functions as F
from chainer import cuda
from Model import CNNModel
from parameters import Parameters
from read_file import get_data
if __name__ == u'__main__':
# args
parser = argparse.ArgumentParser()
parser.add_argument('--model', '-m', default=None, type=str)
parser.add_argument('--params', '-p', default=None, type=str)
parser.add_argument('--testdata', '-t', default=None, type=str)
args = parser.parse_args()
if args.model is None:
args.model = 'cnn_model.dump'
if args.params is None:
args.params = 'params.dump'
# get data
label_data, test_data = get_data(args.testdata)
# prams
with open(args.params, 'rb') as f:
params = pickle.load(f)
# cnn_model
cnn_model = CNNModel(len(test_data[0]), len(test_data[0][0]),
params.filter_size, max(label_data) + 1)
chainer.serializers.load_npz(args.model, cnn_model)
num_miss = 0
num = 0
for t, l in zip(test_data, label_data):
num += 1
x = chainer.Variable(np.asarray([[t]], dtype = np.float32))
data = F.softmax(cnn_model(x).data).data
max_id = np.argmax(data)
if l != max_id:
print('{} : {}'.format(l, max_id))
num_miss += 1
print('correct_num : {}/{}, correct_rate = {}'.format(num - num_miss, num, float(num - num_miss)/num))
| {
"content_hash": "cbfa27d7f45f1a45f846302793e0e731",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 106,
"avg_line_length": 27.927272727272726,
"alnum_prop": 0.61328125,
"repo_name": "takat0m0/test_code",
"id": "95ce742ea6ca4082d9c70c3555cd877d13957265",
"size": "1560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cnn_example/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38944"
}
],
"symlink_target": ""
} |
class ShellTemplate(object):
def __init__(
self,
name,
description,
repository,
min_cs_ver,
standard=None,
standard_version=None,
params=None,
):
self.name = name
self.description = description
self.repository = repository
self.min_cs_ver = min_cs_ver
self.standard = standard
self.standard_version = standard_version or {}
self.params = params or {}
| {
"content_hash": "7767e0f0d8183afe53aceaa29e34c859",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 54,
"avg_line_length": 26.61111111111111,
"alnum_prop": 0.5532359081419624,
"repo_name": "QualiSystems/shellfoundry",
"id": "8c431977f44a9fd0d5e3cb6a52b7e1b5b17092bb",
"size": "523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shellfoundry/models/shell_template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "375311"
},
{
"name": "Rich Text Format",
"bytes": "692763"
}
],
"symlink_target": ""
} |
from labels import LabelsPlugin
from electrum.plugins import hook
class Plugin(LabelsPlugin):
@hook
def load_wallet(self, wallet, window):
self.window = window
self.start_wallet(wallet)
def on_pulled(self, wallet):
self.print_error('on pulled')
self.window._trigger_update_history()
| {
"content_hash": "6aa4f5c46cdb8db20d746498f08bd3d4",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 45,
"avg_line_length": 23.642857142857142,
"alnum_prop": 0.6737160120845922,
"repo_name": "fireduck64/electrum",
"id": "1a4b31f1738cd8decc9061e9050ddaf9c3c0d91a",
"size": "331",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "plugins/labels/kivy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3867"
},
{
"name": "Makefile",
"bytes": "836"
},
{
"name": "NSIS",
"bytes": "7125"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Protocol Buffer",
"bytes": "2354"
},
{
"name": "Python",
"bytes": "1241321"
},
{
"name": "Shell",
"bytes": "7035"
}
],
"symlink_target": ""
} |
"""
PHYLIP multiple sequence alignment format (:mod:`skbio.io.phylip`)
==================================================================
.. currentmodule:: skbio.io.phylip
The PHYLIP file format stores a multiple sequence alignment. The format was
originally defined and used in Joe Felsenstein's PHYLIP package [1]_, and has
since been supported by several other bioinformatics tools (e.g., RAxML [2]_).
See [3]_ for the original format description, and [4]_ and [5]_ for additional
descriptions.
An example PHYLIP-formatted file taken from [3]_::
5 42
Turkey AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
Salmo gairAAGCCTTGGC AGTGCAGGGT GAGCCGTGGC CGGGCACGGT AT
H. SapiensACCGGTTGGC CGTTCAGGGT ACAGGTTGGC CGTTCAGGGT AA
Chimp AAACCCTTGC CGTTACGCTT AAACCGAGGC CGGGACACTC AT
Gorilla AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA
.. note:: Original copyright notice for the above PHYLIP file:
*(c) Copyright 1986-2008 by The University of Washington. Written by Joseph
Felsenstein. Permission is granted to copy this document provided that no
fee is charged for it and that this copyright notice is not removed.*
Format Support
--------------
**Has Sniffer: No**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|No |Yes |:mod:`skbio.alignment.Alignment` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
PHYLIP format is a plain text format containing exactly two sections: a header
describing the dimensions of the alignment, followed by the multiple sequence
alignment itself.
The format described here is "strict" PHYLIP, as described in [4]_. Strict
PHYLIP requires that each sequence identifier is exactly 10 characters long
(padded with spaces as necessary). Other bioinformatics tools (e.g., RAxML) may
relax this rule to allow for longer sequence identifiers. See the
**Alignment Section** below for more details.
The format described here is "sequential" format. The original PHYLIP format
specification [3]_ describes both sequential and interleaved formats.
.. note:: scikit-bio currently only supports writing strict, sequential
PHYLIP-formatted files from an ``skbio.alignment.Alignment``. It does not
yet support reading PHYLIP-formatted files, nor does it support relaxed or
interleaved PHYLIP formats.
Header Section
^^^^^^^^^^^^^^
The header consists of a single line describing the dimensions of the
alignment. It **must** be the first line in the file. The header consists of
optional spaces, followed by two positive integers (``n`` and ``m``) separated
by one or more spaces. The first integer (``n``) specifies the number of
sequences (i.e., the number of rows) in the alignment. The second integer
(``m``) specifies the length of the sequences (i.e., the number of columns) in
the alignment. The smallest supported alignment dimensions are 1x1.
.. note:: scikit-bio will write the PHYLIP format header *without* preceding
spaces, and with only a single space between ``n`` and ``m``.
PHYLIP format *does not* support blank line(s) between the header and the
alignment.
Alignment Section
^^^^^^^^^^^^^^^^^
The alignment section immediately follows the header. It consists of ``n``
lines (rows), one for each sequence in the alignment. Each row consists of a
sequence identifier (ID) and characters in the sequence, in fixed width format.
The sequence ID can be up to 10 characters long. IDs less than 10 characters
must have spaces appended to them to reach the 10 character fixed width. Within
an ID, all characters except newlines are supported, including spaces,
underscores, and numbers.
.. note:: While not explicitly stated in the original PHYLIP format
description, scikit-bio only supports writing unique sequence identifiers
(i.e., duplicates are not allowed). Uniqueness is required because an
``skbio.alignment.Alignment`` cannot be created with duplicate IDs.
scikit-bio supports the empty string (``''``) as a valid sequence ID. An
empty ID will be padded with 10 spaces.
Sequence characters immediately follow the sequence ID. They *must* start at
the 11th character in the line, as the first 10 characters are reserved for the
sequence ID. While PHYLIP format does not explicitly restrict the set of
supported characters that may be used to represent a sequence, the original
format description [3]_ specifies the IUPAC nucleic acid lexicon for DNA or RNA
sequences, and the IUPAC protein lexicon for protein sequences. The original
PHYLIP specification uses ``-`` as a gap character, though older versions also
supported ``.``. The sequence characters may contain optional spaces (e.g., to
improve readability), and both upper and lower case characters are supported.
.. note:: scikit-bio will write a PHYLIP-formatted file even if the alignment's
sequence characters are not valid IUPAC characters. This differs from the
PHYLIP specification, which states that a PHYLIP-formatted file can only
contain valid IUPAC characters. To check whether all characters are valid
before writing, the user can call ``Alignment.is_valid()``.
Since scikit-bio supports both ``-`` and ``.`` as gap characters (e.g., in
``skbio.alignment.Alignment``), both are supported when writing a
PHYLIP-formatted file.
When writing a PHYLIP-formatted file, scikit-bio will split up each sequence
into chunks that are 10 characters long. Each chunk will be separated by a
single space. The sequence will always appear on a single line (sequential
format). It will *not* be wrapped across multiple lines. Sequences are
chunked in this manner for improved readability, and because most example
PHYLIP files are chunked in a similar way (e.g., see the example file
above). Note that this chunking is not required by the PHYLIP format.
Examples
--------
Let's create an alignment with three DNA sequences of equal length:
>>> from skbio import Alignment, DNA
>>> seqs = [DNA('ACCGTTGTA-GTAGCT', metadata={'id':'seq1'}),
... DNA('A--GTCGAA-GTACCT', metadata={'id':'sequence-2'}),
... DNA('AGAGTTGAAGGTATCT', metadata={'id':'3'})]
>>> aln = Alignment(seqs)
>>> aln
<Alignment: n=3; mean +/- std length=16.00 +/- 0.00>
Now let's write the alignment to file in PHYLIP format, and take a look at the
output:
>>> from StringIO import StringIO
>>> fh = StringIO()
>>> aln.write(fh, format='phylip')
>>> print(fh.getvalue())
3 16
seq1 ACCGTTGTA- GTAGCT
sequence-2A--GTCGAA- GTACCT
3 AGAGTTGAAG GTATCT
<BLANKLINE>
>>> fh.close()
Notice that the 16-character sequences were split into two chunks, and that
each sequence appears on a single line (sequential format). Also note that each
sequence ID is padded with spaces to 10 characters in order to produce a fixed
width column.
If the sequence IDs in an alignment surpass the 10-character limit, an error
will be raised when we try to write a PHYLIP file:
>>> long_id_seqs = [DNA('ACCGT', metadata={'id':'seq1'}),
... DNA('A--GT', metadata={'id':'long-sequence-2'}),
... DNA('AGAGT', metadata={'id':'seq3'})]
>>> long_id_aln = Alignment(long_id_seqs)
>>> fh = StringIO()
>>> long_id_aln.write(fh, format='phylip')
Traceback (most recent call last):
...
PhylipFormatError: Alignment can only be written in PHYLIP format if all \
sequence IDs have 10 or fewer characters. Found sequence with ID \
'long-sequence-2' that exceeds this limit. Use Alignment.update_ids to assign \
shorter IDs.
>>> fh.close()
One way to work around this is to update the IDs to be shorter. The recommended
way of accomplishing this is via ``Alignment.update_ids``, which provides a
flexible way of creating a new ``Alignment`` with updated IDs. For example, to
remap each of the IDs to integer-based IDs:
>>> short_id_aln, _ = long_id_aln.update_ids()
>>> short_id_aln.ids()
['1', '2', '3']
We can now write the new alignment in PHYLIP format:
>>> fh = StringIO()
>>> short_id_aln.write(fh, format='phylip')
>>> print(fh.getvalue())
3 5
1 ACCGT
2 A--GT
3 AGAGT
<BLANKLINE>
>>> fh.close()
References
----------
.. [1] http://evolution.genetics.washington.edu/phylip.html
.. [2] RAxML Version 8: A tool for Phylogenetic Analysis and
Post-Analysis of Large Phylogenies". In Bioinformatics, 2014
.. [3] http://evolution.genetics.washington.edu/phylip/doc/sequence.html
.. [4] http://www.phylo.org/tools/obsolete/phylip.html
.. [5] http://www.bioperl.org/wiki/PHYLIP_multiple_alignment_format
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.alignment import Alignment
from skbio.io import register_writer, PhylipFormatError
from skbio.util._misc import chunk_str
@register_writer('phylip', Alignment)
def _alignment_to_phylip(obj, fh):
if obj.is_empty():
raise PhylipFormatError(
"Alignment can only be written in PHYLIP format if there is at "
"least one sequence in the alignment.")
sequence_length = obj.sequence_length()
if sequence_length == 0:
raise PhylipFormatError(
"Alignment can only be written in PHYLIP format if there is at "
"least one position in the alignment.")
chunk_size = 10
for id_ in obj.ids():
if len(id_) > chunk_size:
raise PhylipFormatError(
"Alignment can only be written in PHYLIP format if all "
"sequence IDs have %d or fewer characters. Found sequence "
"with ID '%s' that exceeds this limit. Use "
"Alignment.update_ids to assign shorter IDs." %
(chunk_size, id_))
sequence_count = obj.sequence_count()
fh.write('{0:d} {1:d}\n'.format(sequence_count, sequence_length))
fmt = '{0:%d}{1}\n' % chunk_size
for seq in obj:
chunked_seq = chunk_str(str(seq), chunk_size, ' ')
fh.write(fmt.format(seq.metadata['id'], chunked_seq))
| {
"content_hash": "12aa1c1efaa789a4c8e998b8da9ae745",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 79,
"avg_line_length": 42.857723577235774,
"alnum_prop": 0.6813051313667836,
"repo_name": "Achuth17/scikit-bio",
"id": "886359c6e71ccbe02a9198870903daa6ae68f70c",
"size": "10543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skbio/io/phylip.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39087"
},
{
"name": "CSS",
"bytes": "4379"
},
{
"name": "Groff",
"bytes": "259"
},
{
"name": "Makefile",
"bytes": "585"
},
{
"name": "Python",
"bytes": "1852175"
}
],
"symlink_target": ""
} |
from baidupush import BaiduPush, BaiduPushError
__all__ = ( 'BaiduPush', 'BaiduPushError', )
| {
"content_hash": "b5132b56f9ed09b977049337186f8b40",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 47,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.723404255319149,
"repo_name": "quatanium/python-baidu-push-server",
"id": "3aba2a1364b30260936ce7bf1a905bf58f16569b",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "baidupush/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17716"
}
],
"symlink_target": ""
} |
import os
import sys
import time
import unittest
import vk
import utils
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
# copy to test_props.py and fill it
USER_LOGIN = '' # user email or phone number
USER_PASSWORD = '' # user password
APP_ID = '' # aka API/Client ID
from test_props import USER_LOGIN, USER_PASSWORD, APP_ID
class UtilsTestCase(unittest.TestCase):
def test_stringify(self):
self.assertEqual({1: 'str,str2'}, utils.stringify_values({1: ['str', 'str2']}))
def test_stringify_2(self):
self.assertEqual({1: u'str,стр2'}, utils.stringify_values({1: ['str', u'стр2']}))
def test_stringify_3(self):
self.assertEqual({1: u'стр,стр2'}, utils.stringify_values({1: [u'стр', u'стр2']}))
class VkTestCase(unittest.TestCase):
def setUp(self):
auth_session = vk.AuthSession(app_id=APP_ID, user_login=USER_LOGIN, user_password=USER_PASSWORD)
access_token, _ = auth_session.get_access_token()
session = vk.Session(access_token=access_token)
self.vk_api = vk.API(session, lang='ru')
def test_get_server_time(self):
time_1 = time.time() - 1
time_2 = time_1 + 10
server_time = self.vk_api.getServerTime()
self.assertTrue(time_1 <= server_time <= time_2)
def test_get_server_time_via_token_api(self):
time_1 = time.time() - 1
time_2 = time_1 + 10
server_time = self.vk_api.getServerTime()
self.assertTrue(time_1 <= server_time <= time_2)
def test_get_profiles_via_token(self):
profiles = self.vk_api.users.get(user_id=1)
self.assertEqual(profiles[0]['last_name'], u'Дуров')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "5400f485ebf5ce551965d22916f47dfc",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 104,
"avg_line_length": 29.982758620689655,
"alnum_prop": 0.6233467510063255,
"repo_name": "chibisov/vk",
"id": "8b4a2ab050c3e4224ddd10da6eb3779a9103b139",
"size": "1777",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vk/tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21445"
}
],
"symlink_target": ""
} |
'''Utility functions to search, filter, and manipulate output data of Chrome's event tracer'''
import tkinter
from tkinter.filedialog import askopenfilename
import json
import csv
import os;
def findFile():
#Gets rid of a GUI element native to tkinter
root = tkinter.Tk();
root.withdraw();
return askopenfilename()
def splitList(events, attr):
'''Split a set of events into set of lists whose elements differ
by the attribute argument'''
categorized_events = {};
for event in events:
category_key = event[attr];
if(category_key not in categorized_events):
#Init a key : value pair
val = [];
categorized_events[category_key] = val;
#print(categorized_events);
else:
val = categorized_events[category_key];
val.append(event);
return categorized_events;
def filterEvents(events, attr, val):
return [event for event in events if event[attr] == val]
def extract(events, attr):
return [event[attr] for event in events if attr in event];
def toTxt(data, name, subdir = ""):
filepath = ""
if(not subdir == ""):
filepath = os.getcwd() + "\\" + subdir
if not os.path.exists(filepath):
os.makedirs(filepath)
filepath += "\\"
with open(filepath + name + ".txt", "w") as out:
for element in data:
out.write(str(element) + "\n");
def runtime(events, attr = "ts"):
'''Gets the runtime of a list events. This assumes events were derived
from chrome's profiler, which is already in sorted order'''
starttime = events[0][attr];
endtime = events[-1][attr];
return endtime - starttime;
def readCSVFuncData(funcdata_csv):
return list(csv.DictReader(funcdata_csv))
def onAllProfilesInDir(dir, func):
sub_directories = os.listdir(dir)
print(sub_directiories) | {
"content_hash": "90dd710d7524adb10e0ddb6b61b9e5a1",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 94,
"avg_line_length": 30.630769230769232,
"alnum_prop": 0.6052235057759919,
"repo_name": "benjaminy/Charcoal",
"id": "355ea47dedb98060c50c3399268c6d023a3fbca7",
"size": "1991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RealAppAnalysis/test dump/browsers/datautil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "168860"
},
{
"name": "C++",
"bytes": "342986"
},
{
"name": "CSS",
"bytes": "2466"
},
{
"name": "Go",
"bytes": "1373"
},
{
"name": "HTML",
"bytes": "5520"
},
{
"name": "Haskell",
"bytes": "2836"
},
{
"name": "Java",
"bytes": "3893"
},
{
"name": "JavaScript",
"bytes": "47815"
},
{
"name": "Makefile",
"bytes": "15619"
},
{
"name": "Python",
"bytes": "195186"
},
{
"name": "Shell",
"bytes": "8963"
},
{
"name": "Standard ML",
"bytes": "1765"
},
{
"name": "TeX",
"bytes": "1114333"
}
],
"symlink_target": ""
} |
class Dispenser(object):
def __init__(self):
self.placebos = {}
def __call__(self, name=None):
return Placebo(name)
def __getattr__(self, name):
return self.placebos.setdefault(name, Placebo(name))
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.verify()
def verify(self):
for m in self.placebos.itervalues():
m.verify()
class Placebo(object):
def __init__(self, name='Mock'):
self.name = name
self.expectations = []
self.gets = {}
def receives(self, method_name):
exp = Expectation(self, method_name)
setattr(self, method_name, exp)
self.expectations.append(exp)
return exp
def attr(self, name):
exp = Expectation(self, name)
self.gets[name] = exp
return exp
def __getattr__(self, name):
if name in self.gets:
return self.gets[name]() # need to call attributes ourself
raise AttributeError, "%r not found in %r" % (name, self)
def verify(self):
for each in self.expectations:
each.verify()
def __repr__(self):
return "<Mock '%s'>" % self.name
def __str__(self):
return repr(self)
class Expectation(object):
def __init__(self, mock, name):
self.mock = mock
self.method_name = name
self.times_called = 0
self.return_value = None
self.behavior = None
self.expected_times_called = 0
# Always return the return_value if set.
def __call__(self, *args, **kwargs):
self.__verify_args(*args, **kwargs)
self.times_called += 1
result = None
if self.behavior:
result = self.behavior(self, *args, **kwargs)
if not self.return_value:
return result
return self.return_value
def with_args(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
return self
def called(self, num):
self.expected_times_called = num
return self
def once(self):
return self.called(1)
def twice(self):
return self.called(2)
def returns(self, ret):
self.return_value = ret
return self
and_returns = returns
def does(self, func):
self.behavior = func
return self
and_does = does
def verify(self):
if self.expected_times_called > 0:
assert self.times_called == self.expected_times_called, \
"%s expected to be called %s times, but was called %s times" % \
(repr(self), self.expected_times_called, self.times_called)
def __verify_args(self, *args, **kwargs):
if hasattr(self, 'args'):
self.__verify_positional_args(args)
self.__verify_keywords_args(kwargs)
def __verify_positional_args(self, args):
assert len(self.args) == len(args), \
"%s expected %s positional arguments, but received %s\n\t%s" % \
(repr(self), len(self.args), len(args), repr(args))
for (i, (expected, received)) in enumerate(zip(self.args, args)):
assert expected is received, \
"%s at position %d: expected: %s received: %s" % \
(repr(self), i, repr(expected), repr(received))
def __verify_keywords_args(self, kwargs):
assert len(self.kwargs) == len(kwargs), \
"%s expected %s positional arguments, but received %s" % \
(repr(self), len(self.kwargs), len(kwargs))
for k in self.kwargs:
assert k in kwargs, "%s missing keyword %s" % (repr(self), k)
assert self.kwargs[k] is kwargs[k], \
"%s keyword %s: expected: %s received: %s" % \
(repr(self), k, repr(self.kwargs[k]), repr(kwargs[k]))
def __repr__(self):
return "<%s '%s.%s'>" % (self.__class__.__name__, self.mock.name, self.method_name)
| {
"content_hash": "749b88264641e78f3801b8975530d576",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 91,
"avg_line_length": 32.14728682170543,
"alnum_prop": 0.53315649867374,
"repo_name": "wilkes/expectorant",
"id": "39dd246e5cc7834c6de2fbf152673c0b7af1f946",
"size": "4147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/expectorant/placebos.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11677"
}
],
"symlink_target": ""
} |
"""\
Tests against <https://github.com/mnooner256/pyqrcode/issues/50>
"""
from __future__ import unicode_literals
from nose.tools import eq_
from segno_mimos import pyqrcode
class FakeString(str):
'''
Create a mock class that *acts* like a string as far as needed for the
QRCode constructor, but raises an exception in case shiftjis encoding is
used on its value.
This mimics the behaviour of Python on an environment where this codec is
not installed.
'''
def __new__(cls, *more):
return str.__new__(cls, *more)
def encode(self, encoding=None, errors='strict'):
if encoding == 'shiftjis':
raise LookupError("unknown encoding: shiftjis")
return super(FakeString, self).encode(encoding, errors)
def test_constructing_without_shiftjis_encoding_available():
content = FakeString("t123456789")
code = pyqrcode.create(content, error="Q")
eq_(code.mode, 'binary')
| {
"content_hash": "69d9791f9f7a4c6b58ed40da6db19cf2",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 32.6551724137931,
"alnum_prop": 0.6842661034846885,
"repo_name": "heuer/segno-mimos",
"id": "61af14236aad07ab97f00ff1014499dc5ea88376",
"size": "971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pyqrcode/test_issue50.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "71189"
}
],
"symlink_target": ""
} |
import settings
from ast import literal_eval
STACK_TRACE_ITEM_POSITION_LINE_NUMBER = 1
STACK_TRACE_ITEM_POSITION_FUNCTION_CALL = 3
class ExecutedInstructionsGroup(object):
def __init__(self):
self.function_caller = None
self.function_callees = []
self.function_call_line = None
self.return_call_line = None
self.intermediate_code_lines = []
class StackTraceItem(object):
def __init__(self):
self.line_number_in_transformed_code = 0
# TODO: replace with function name and arguments
self.function_call = ""
def __repr__(self):
return self.__str__()
def __str__(self):
return "[StackTraceItem] line_number_in_transformed_code: " + str(self.line_number_in_transformed_code) + ", call: " + self.function_call
class ExecutedInstruction(object):
'''Abstraction over single code instruction (for ex. assignment) or group of instructions (such as stacktrace)'''
# TODO: replace with enum
def __init__(self):
self.data_type = None
self.var_name = None
self.data_value = None
self.execution_order_number = 0
self.stacktrace_items = []
def __str__(self):
stacktrace_string = ""
for stack_trace_item in self.stacktrace_items:
stacktrace_string += "\n" + str(stack_trace_item)
return "[DataLine] type: " + str(self.data_type) + ", var_name: " + str(self.var_name) + ", execution_order: " + str(self.execution_order_number) + str(stacktrace_string)
def __repr__(self):
return self.__str__()
def build_execution_tree(generated_data_filename):
with open(generated_data_filename, "r") as collected_data:
execution_order_number = 1
parsed_data_lines = []
for line in collected_data:
instruction_line = parse_instruction(line, execution_order_number)
parsed_data_lines.append(instruction_line)
execution_order_number += 1
remain_function_calls = []
remain_instructions_lines = []
generated_instructions_groups = []
for instruction_line in parsed_data_lines:
if instruction_line.data_type == settings.META_MARK_FUNC_CALL_STACKTRACE:
remain_function_calls.append(instruction_line)
remain_instructions_lines.append(instruction_line)
elif instruction_line.data_type == settings.META_MARK_RETURN_STACKTRACE:
intructions_group = ExecutedInstructionsGroup()
intructions_group.function_call_line = remain_function_calls.pop()
intructions_group.return_call_line = instruction_line
intermediate_instruction = remain_instructions_lines.pop()
while intermediate_instruction != intructions_group.function_call_line:
intructions_group.intermediate_code_lines.insert(0, intermediate_instruction)
intermediate_instruction = remain_instructions_lines.pop()
generated_instructions_groups.append(intructions_group)
else:
remain_instructions_lines.append(instruction_line)
if remain_instructions_lines.count > 0:
print "Check unprocessed. The only correct case here is when var_change was done outside the function call."
print "For example: global functions."
generated_instructions_groups.sort(cmp=compare_instructions)
processed_instructions_groups = []
for instructions_group in generated_instructions_groups:
if len(processed_instructions_groups) == 0:
processed_instructions_groups.append(instructions_group)
else:
# find parent function(caller)
for processed_group in reversed(processed_instructions_groups):
processed_ret_call_order = processed_group.return_call_line.execution_order_number
processed_func_call_order = processed_group.function_call_line.execution_order_number
current_func_call_order = instructions_group.function_call_line.execution_order_number
current_ret_call_order = instructions_group.return_call_line.execution_order_number
if ((processed_func_call_order < current_func_call_order)
and (processed_ret_call_order > current_ret_call_order)):
instructions_group.function_caller = processed_group
processed_group.function_callees.append(instructions_group)
processed_instructions_groups.append(instructions_group)
break
return processed_instructions_groups[0]
def compare_instructions(instructions_group1, instructions_group2):
return cmp(instructions_group1.function_call_line.execution_order_number, instructions_group2.function_call_line.execution_order_number)
def parse_instruction(line, execution_order_number):
data_line = ExecutedInstruction()
if line.startswith(settings.META_MARK_VARCHANGE):
filtered_data_string = line.replace(settings.META_MARK_VARCHANGE, "")
splitter_position = filtered_data_string.find("=")
var_name = filtered_data_string[:splitter_position]
filtered_data_string = filtered_data_string[splitter_position + 2:]
data_line.data_type = settings.META_MARK_VARCHANGE
data_line.var_name = var_name
data_line.data_value = literal_eval(filtered_data_string)
elif line.startswith(settings.META_MARK_FUNC_CALL_STACKTRACE):
filtered_data_string = line.replace(settings.META_MARK_FUNC_CALL_STACKTRACE, "")
data_line.data_type = settings.META_MARK_FUNC_CALL_STACKTRACE
data_line.data_value = literal_eval(filtered_data_string)
data_line.stacktrace_items = process_stacktrace_info(data_line.data_value)
elif line.startswith(settings.META_MARK_RETURN_STACKTRACE):
filtered_data_string = line.replace(settings.META_MARK_RETURN_STACKTRACE, "")
data_line.data_type = settings.META_MARK_RETURN_STACKTRACE
data_line.data_value = literal_eval(filtered_data_string)
data_line.stacktrace_items = process_stacktrace_info(data_line.data_value)
data_line.execution_order_number = execution_order_number
return data_line
def process_stacktrace_info(stack_trace):
stack_trace_items = []
for stack_trace_item in stack_trace:
if stack_trace_item:
if stack_trace_item[0] == settings.TRANSFORMED_SOURCE_FILE:
stack_trace_item_structure = StackTraceItem()
position = 0
for attribute in stack_trace_item:
if position == STACK_TRACE_ITEM_POSITION_LINE_NUMBER:
stack_trace_item_structure.line_number_in_transformed_code = attribute
elif position == STACK_TRACE_ITEM_POSITION_FUNCTION_CALL:
stack_trace_item_structure.function_call = attribute
position += 1
# To remove useless function call generated by the Luminous-tool
if stack_trace_item_structure.function_call.startswith(settings.FILE_DESCRIPTOR_NAME) == False:
stack_trace_items.append(stack_trace_item_structure)
return stack_trace_items
def collect_amount_attributes(instruction_groups):
print "processed_instructions_groups \n\n"
for instructions_group in instruction_groups:
print "parent: " + str(instructions_group.function_caller)
print "childs: " + str(instructions_group.function_callees)
print "order: " + str(instructions_group.function_call_line.execution_order_number)
print "order: " + str(instructions_group.return_call_line.execution_order_number)
print "\n" | {
"content_hash": "8e11133bd091368290d0c990916bca7e",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 178,
"avg_line_length": 46.52095808383233,
"alnum_prop": 0.6637919938215987,
"repo_name": "skyylex/Luminous-proof-of-concept",
"id": "73d42b1235f585acbd81eb439283f6a8c1e9b559",
"size": "7769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/execution_tree_builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2988"
}
],
"symlink_target": ""
} |
from django import test
from django.core.management import call_command
from django.db import connection
from django.db.migrations.loader import MigrationLoader
class MigrationTestCase(test.TransactionTestCase):
"""
Test case for a Django database migration.
Example::
class TestSomeMigrations(MigrationTestCase):
migrate_from = '0002_previous_migration'
migrate_to = '0003_migration_being_tested'
def test_is_selected_is_flipped(self):
MyModel = self.apps_before.get_model('myapp', 'MyModel')
MyModel.objects.create(
name='Test1',
is_selected=True
)
MyModel.objects.create(
name='Test2',
is_selected=False
)
MyModel.objects.create(
name='Test3',
is_selected=True
)
self.migrate()
MyModel = self.apps_after.get_model('myapp', 'MyModel')
self.assertEqual(MyModel.objects.filter(is_selected=True).count, 1)
self.assertEqual(MyModel.objects.filter(is_selected=False).count, 2)
"""
#: The django app_label for the app you are migrating. This is the same app_label as you
#: use with ``python manage.py makemigrations <app_label>`` to create the migration.
app_label = None
#: Dependencies. A list of ``(app_label, migration_name)`` tuples.
migrate_dependencies = None
#: Same as :obj:`~.MigrationTestCase.migrate_dependencies`, but ONLY for :obj:`~.MigrationTestCase.migrate_from`.
migrate_from_dependencies = None
#: Same as :obj:`~.MigrationTestCase.migrate_dependencies`, but ONLY for :obj:`~.MigrationTestCase.migrate_from`.
migrate_to_dependencies = None
#: The name of the migration to migrate from.
#: Can be the full name, or just the number (I.E.: ``0002`` or ``0002_something``.
migrate_from = None
#: The name of the migration to migrate to.
#: Can be the full name, or just the number (I.E.: ``0003`` or ``0003_something``.
migrate_to = None
@classmethod
def _validate_dependency_list(cls, migration_list_name):
migration_list = getattr(cls, migration_list_name)
if not migration_list:
return
for app_label, migration_name in migration_list:
if app_label == cls.app_label:
raise ValueError(
'The "{}" attribute can not contain any migrations with the '
'same app_label as the "app_label" attribute ({})'.format(
app_label, cls.app_label
)
)
@classmethod
def _validate_class_variables(cls):
if not cls.app_label or not cls.migrate_from or not cls.migrate_to:
raise ValueError('app_label, migrate_from and migrate_to must be specified.')
cls._validate_dependency_list('migrate_from_dependencies')
cls._validate_dependency_list('migrate_to_dependencies')
cls._validate_dependency_list('migrate_dependencies')
@classmethod
def setUpClass(cls):
cls._validate_class_variables()
def _build_migration_list(self, migration_name, migrate_dependencies=None):
migration_list = []
if migrate_dependencies:
migration_list.extend(migrate_dependencies)
migration_list.append((self.app_label, migration_name))
return migration_list
def setUp(self):
"""
Perform required setup.
If you override ``setUp()``, you must call ``super().setUp()``!
"""
self._migrate_from_list = self._build_migration_list(
self.migrate_from,
migrate_dependencies=self.migrate_from_dependencies or self.migrate_dependencies)
self._migrate_to_list = self._build_migration_list(
self.migrate_to,
migrate_dependencies=self.migrate_to_dependencies or self.migrate_dependencies)
self._migrate_to_list = [(self.app_label, self.migrate_to)]
self._apps_before = self._get_apps_for_migration(self._migrate_from_list)
self._apps_after = None
for app_label, migration_name in self._migrate_from_list:
self._run_migrate(app_label, migration_name)
def _get_apps_for_migration(self, migration_states):
loader = MigrationLoader(connection)
full_names = []
for app_label, migration_name in migration_states:
if migration_name != 'zero':
migration = loader.get_migration_by_prefix(app_label, migration_name)
full_names.append((app_label, migration.name))
state = loader.project_state(full_names)
return state.apps
@property
def apps_before(self):
"""
Get an ``apps`` object just like the first argument to a Django data migration
at the state before migration has been run.
Only available **before** :meth:`.migrate` has been called, or after :meth:`.reverse_migrate`
has been called.
"""
if self._apps_after is not None:
raise AttributeError('apps_before is only available before migrate() has been run, '
'or after reverse_migrate().')
return self._apps_before
@property
def apps_after(self):
"""
Get an ``apps`` object just like the first argument to a Django data migration
at the state after migration has been run, and not available after :meth:`.reverse_migrate`
has been called (unless :meth:`.migrate` is called again).
Only available **after** :meth:`.migrate` has been called.
"""
if self._apps_after is None:
raise AttributeError('apps_after is only available after migrate() has been run. '
'It is not available after reverse_migrate() unless migrate() '
'has been run again.')
return self._apps_after
def migrate(self):
"""
Migrate the database from :obj:`.migrate_from` to :obj:`.migrate_to`.
"""
if self._apps_after is not None:
raise Exception('migrate() already run. Can not run migrate() multiple times '
'without running reverse_migrate() in between.')
for app_label, migration_name in self._migrate_to_list:
self._run_migrate(app_label, migration_name)
self._apps_after = self._get_apps_for_migration(self._migrate_to_list)
def reverse_migrate(self):
"""
Migrate the database from :obj:`.migrate_to` to :obj:`.migrate_from`.
You must call :meth:`.migrate` before calling this.
"""
if self._apps_after is None:
raise Exception('You must run migrate() before you can run reverse_migrate().')
for app_label, migration_name in self._migrate_from_list:
self._run_migrate(app_label, migration_name)
self._apps_after = None
def get_migrate_command_kwargs(self):
"""
Get kwargs for the ``migrate`` management command.
The defaults are sane, by you may want to override this and change
the ``verbosity`` argument for debugging purposes.
"""
return {'verbosity': 0,
'no_initial_data': True,
'interactive': False}
def _run_migrate(self, app_label, migration_name, fake=False):
kwargs = self.get_migrate_command_kwargs()
kwargs['fake'] = fake
args = ('migrate', app_label, migration_name)
call_command(*args, **kwargs)
| {
"content_hash": "e6bb5f06c798f579ee417a12594f0932",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 117,
"avg_line_length": 40.63157894736842,
"alnum_prop": 0.6040155440414507,
"repo_name": "appressoas/ievv_opensource",
"id": "262a0672972e327d6c7b898583d36a15bae0673e",
"size": "7720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ievv_opensource/utils/testhelpers/testmigrations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "199"
},
{
"name": "Dockerfile",
"bytes": "162"
},
{
"name": "HTML",
"bytes": "7544"
},
{
"name": "JavaScript",
"bytes": "719"
},
{
"name": "Less",
"bytes": "27"
},
{
"name": "Python",
"bytes": "614046"
},
{
"name": "SCSS",
"bytes": "199"
},
{
"name": "Shell",
"bytes": "141"
},
{
"name": "TypeScript",
"bytes": "254"
}
],
"symlink_target": ""
} |
from smtplib import SMTP as smtp
from os import listdir
from os import remove
def is_listed(email):
is_a_member = False
fp = open('/foodcoop/members', 'r')
for eachline in fp:
if email == eachline.strip():
is_a_member = True
fp.close()
return is_a_member
def email_re(list_of_words):
email='NO'
for eachword in list_of_words:
eachword.strip()
if eachword.startswith('<'): email = eachword.replace('<','').replace('>','')
elif eachword.find('@') != -1: email = eachword
return email
def read_emails():
order = False
islisted = False
email = ''
order_list = []
for eachfile in listdir('/home/my_username/Maildir/new'):
file_name = '/home/my_username/Maildir/new/' + eachfile
fp = open(file_name, 'r')
for eachline in fp:
eachline = eachline.strip() + ' 0 1' #to stop index errors
eachline = eachline.split()
if eachline[0].strip() == 'Subject:' and eachline[1].strip().lower() == 'food': order = True
if eachline[0].strip() == 'From:' and is_listed(email_re(eachline)):
islisted = True
email = email_re(eachline)
is_message = False
fp.seek(0)
for eachline in fp:
eachline = eachline.strip()
if len(eachline) == 0:
is_message = True
if is_message:
order_list.append(eachline)
fp.close()
print order_list
if order and islisted:
fp = open('/home/foodcoop/orders','a')
fp.write(email + '\n')
for eachline in order_list:
if eachline: fp.write(eachline + '\n')
fp.write('\n\n')
fp.close()
confirm_order(email)
remove(file_name)
def confirm_order(email='my_email_address@gmail.com'):
NAME = email
FROM = 'customer_service@foodcoop.com'
TO = NAME
BODY = '\r\norder complete!\r\n'
SMTPSERVER = 'localhost'
message = 'From: ' + FROM + '\r\nTo: ' + TO + '\r\nSubject: Food order\r\n\r\n' + BODY
sendserver = smtp(SMTPSERVER)
errors = sendserver.sendmail(FROM, TO, message)
sendserver.quit()
if len(errors) != 0:
fp = open('errors', 'a')
for eachline in errors:
fp.write(eachline+'\n')
fp.write('\n\n')
fp.close()
read_emails()
| {
"content_hash": "df404e8ad186400b4304e8c8e5fd5a43",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 104,
"avg_line_length": 29.27710843373494,
"alnum_prop": 0.5452674897119342,
"repo_name": "rupertsmall/food_coop",
"id": "85ea6be225598d2921845b8a88ad542d8e67b73a",
"size": "2430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "customer_orders.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11489"
}
],
"symlink_target": ""
} |
"""Calculate the number of qubits of a DAG circuit."""
from qiskit.transpiler.basepasses import AnalysisPass
class NumQubits(AnalysisPass):
"""Calculate the number of qubits of a DAG circuit.
The result is saved in ``property_set['num_qubits']`` as an integer.
"""
def run(self, dag):
"""Run the NumQubits pass on `dag`."""
self.property_set["num_qubits"] = dag.num_qubits()
| {
"content_hash": "e922f20c5b651a7d98e9104c8d3b8482",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 72,
"avg_line_length": 29.428571428571427,
"alnum_prop": 0.6650485436893204,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "ed3fbe41f086daa9edca858d68a4d62cf183bd3c",
"size": "896",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "qiskit/transpiler/passes/analysis/num_qubits.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 808d8916868
Revises: 33fec8aa1b6
Create Date: 2016-04-08 21:07:08.499000
"""
# revision identifiers, used by Alembic.
revision = '808d8916868'
down_revision = '33fec8aa1b6'
from alembic import op
import sqlalchemy as sa
import geoalchemy2
from sqlalchemy_utils import URLType
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
"""op.create_table('Stream_Act',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('ath_id', sa.Integer(), nullable=True),
sa.Column('act_id', sa.Integer(), nullable=True),
sa.Column('last_updated_datetime_utc', sa.DateTime(), nullable=True),
sa.Column('act_name', sa.String(length=200), nullable=True),
sa.Column('linestring', geoalchemy2.types.Geometry(geometry_type='LINESTRING'), nullable=True),
sa.Column('multipoint', geoalchemy2.types.Geometry(geometry_type='MULTIPOINT'), nullable=True),
sa.ForeignKeyConstraint(['act_id'], ['Activity.act_id'], ),
sa.ForeignKeyConstraint(['ath_id'], ['Athlete.ath_id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_Stream_Act_act_id'), 'Stream_Act', ['act_id'], unique=False)
op.create_index(op.f('ix_Stream_Act_ath_id'), 'Stream_Act', ['ath_id'], unique=False)
op.create_index(op.f('ix_Stream_Act_linestring'), 'Stream_Act', ['linestring'], unique=False)
op.create_index(op.f('ix_Stream_Act_multipoint'), 'Stream_Act', ['multipoint'], unique=False)
op.drop_table('Stream_HeatPoint')
op.drop_table('Stream_LineString')
op.drop_table('spatial_ref_sys')"""
op.add_column('Segment', sa.Column('avg_grade', sa.Float(precision=4), nullable=True))
op.add_column('Segment', sa.Column('max_grade', sa.Float(precision=4), nullable=True))
op.add_column('Segment', sa.Column('total_elevation_gain', sa.Float(precision=4), nullable=True))
"""op.drop_index('idx_Segment_end_point', table_name='Segment')
op.drop_index('idx_Segment_start_point', table_name='Segment')
op.drop_index('ix_Segment_act_type', table_name='Segment')
op.drop_index('ix_Segment_cat', table_name='Segment')
op.drop_index('ix_Segment_date_created', table_name='Segment')
op.drop_index('ix_Segment_distance', table_name='Segment')
op.drop_index('ix_Segment_elev_gain', table_name='Segment')
op.drop_index('ix_Segment_end_point', table_name='Segment')
op.drop_index('ix_Segment_start_point', table_name='Segment')
op.create_index(op.f('ix_Stream_point'), 'Stream', ['point'], unique=False)
op.drop_index('idx_Stream_point', table_name='Stream')"""
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
"""op.create_index('idx_Stream_point', 'Stream', ['point'], unique=False)
op.drop_index(op.f('ix_Stream_point'), table_name='Stream')
op.create_index('ix_Segment_start_point', 'Segment', ['start_point'], unique=False)
op.create_index('ix_Segment_end_point', 'Segment', ['end_point'], unique=False)
op.create_index('ix_Segment_elev_gain', 'Segment', ['elev_gain'], unique=False)
op.create_index('ix_Segment_distance', 'Segment', ['distance'], unique=False)
op.create_index('ix_Segment_date_created', 'Segment', ['date_created'], unique=False)
op.create_index('ix_Segment_cat', 'Segment', ['cat'], unique=False)
op.create_index('ix_Segment_act_type', 'Segment', ['act_type'], unique=False)
op.create_index('idx_Segment_start_point', 'Segment', ['start_point'], unique=False)
op.create_index('idx_Segment_end_point', 'Segment', ['end_point'], unique=False)"""
op.drop_column('Segment', 'total_elevation_gain')
op.drop_column('Segment', 'max_grade')
op.drop_column('Segment', 'avg_grade')
"""op.create_table('spatial_ref_sys',
sa.Column('srid', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('auth_name', sa.VARCHAR(length=256), autoincrement=False, nullable=True),
sa.Column('auth_srid', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('srtext', sa.VARCHAR(length=2048), autoincrement=False, nullable=True),
sa.Column('proj4text', sa.VARCHAR(length=2048), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('srid', name=u'spatial_ref_sys_pkey')
)
op.create_table('Stream_LineString',
sa.Column('ath_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('act_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('act_name', sa.VARCHAR(length=200), autoincrement=False, nullable=True),
sa.Column('act_type', sa.VARCHAR(length=20), autoincrement=False, nullable=True),
sa.Column('linestring', geoalchemy2.types.Geometry(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('act_id', name=u'act_id_pk')
)
op.create_table('Stream_HeatPoint',
sa.Column('ath_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('point', geoalchemy2.types.Geometry(), autoincrement=False, nullable=True),
sa.Column('density', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True),
sa.Column('speed', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True),
sa.Column('grade', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True),
sa.Column('power', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True),
sa.Column('hr', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True),
sa.Column('cadence', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True)
)
op.drop_index(op.f('ix_Stream_Act_multipoint'), table_name='Stream_Act')
op.drop_index(op.f('ix_Stream_Act_linestring'), table_name='Stream_Act')
op.drop_index(op.f('ix_Stream_Act_ath_id'), table_name='Stream_Act')
op.drop_index(op.f('ix_Stream_Act_act_id'), table_name='Stream_Act')
op.drop_table('Stream_Act')"""
### end Alembic commands ###
| {
"content_hash": "2de4c0a276e409728875467a11049dc8",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 104,
"avg_line_length": 57.542857142857144,
"alnum_prop": 0.6931479642502483,
"repo_name": "ryanbaumann/athletedataviz",
"id": "9fd5281228ff3d7f13479ee9412e968f8e2ea3fd",
"size": "6042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/808d8916868_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "204523"
},
{
"name": "HTML",
"bytes": "67946"
},
{
"name": "JavaScript",
"bytes": "45354"
},
{
"name": "Mako",
"bytes": "470"
},
{
"name": "Python",
"bytes": "173041"
},
{
"name": "Shell",
"bytes": "152"
}
],
"symlink_target": ""
} |
import urllib2
import cookielib
import requests
import re
import json
import codecs
import os
import datetime
import random
import time
import mysql.connector
import variable
import facebook_variable
class User(object):
def __init__(self):
super(User, self).__init__()
self.data = {
'profile': {},
'following': [],
'followers': [],
'latest': [],
'recommends': [],
'highlights': {},
'responses': [],
}
def getstr(self):
result = json.dumps(self.data, indent=4)
return result
class Story(object):
def __init__(self):
super(Story, self).__init__()
self.data = {
'story_id': "",
'author': "",
'timestamp': 0,
'published_date': "",
'collection': {},
'tags': [],
'recommends': 0,
'responses': 0,
'response_to': "",
'success': 1,
}
def getstr(self):
result = json.dumps(self.data, indent=4)
return result
class FBUser(object):
def __init__(self):
super(FBUser, self).__init__()
self.data = {
'user_id': '',
'URL': '',
'Name': '',
'Friends': None,
'Current City': '',
'Hometown': '',
'Birthday': '',
'Gender': '',
'Languages': '',
}
def getstr(self):
result = json.dumps(self.data, indent=4)
return result
def mark_failed_post(post):
conn = mysql.connector.connect(host=variable.host, port=3306, user=variable.username, password=variable.password,
database='Medium', charset='utf8')
cur = conn.cursor()
sql = "UPDATE posts SET failed=1 WHERE post_id='%s'" % post
cur.execute(sql)
cur.close()
conn.commit()
conn.close()
def get_story(post_id):
url = 'https://medium.com/posts/' + post_id
story = Story()
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
try:
response = opener.open(req, timeout=10)
except urllib2.URLError:
story.data['success'] = 0
print('----------timeout')
mark_failed_post(post_id)
return story
data = response.read()
story_id = re.findall('data-post-id="(.*?)" data-is-icon', data)
if not story_id:
story.data['success'] = 0
print('----------fail to get story_id')
mark_failed_post(post_id)
return story
else:
story.data['story_id'] = story_id[0]
author = re.findall('"username":"(.*?)","createdAt"', data)
if not author:
story.data['success'] = 0
print('----------fail to get author')
mark_failed_post(post_id)
return story
else:
story.data['author'] = author[0]
timestamp = re.findall('"firstPublishedAt":(.*?),"latestPublishedAt"', data)
if not timestamp:
story.data['success'] = 0
print('----------fail to get timestamp')
mark_failed_post(post_id)
return story
else:
story.data['timestamp'] = float(timestamp[0])
story.data['published_date'] = datetime.date.fromtimestamp(story.data['timestamp']/1000.0).isoformat()
collection = re.findall('"approvedHomeCollection":(.*?),"newsletterId"', data)
if not collection:
story.data['collection'] = {}
else:
story.data['collection'] = json.loads(collection[0])
story.data['collection'].pop("sections", None)
story.data['collection'].pop("virtuals", None)
story.data['collection'].pop("colorPalette", None)
story.data['collection'].pop("highlightSpectrum", None)
story.data['collection'].pop("defaultBackgroundSpectrum", None)
story.data['collection'].pop("navItems", None)
story.data['collection'].pop("ampLogo", None)
tags = re.findall('false,"tags":(.*?),"socialRecommendsCount"', data)
if not tags:
story.data['success'] = 0
print('----------fail to get tags')
mark_failed_post(post_id)
return story
else:
story.data['tags'] = json.loads(tags[0])
recommends = re.findall('"recommends":(.*?),"socialRecommends"', data)
if not recommends:
story.data['success'] = 0
print('----------fail to get recommends')
mark_failed_post(post_id)
return story
else:
story.data['recommends'] = eval(recommends[0])
responses = re.findall('"responsesCreatedCount":(.*?),"links"', data)
if not responses:
story.data['success'] = 0
print('----------fail to get responses')
mark_failed_post(post_id)
return story
else:
story.data['responses'] = eval(responses[0])
response_to = re.findall('"inResponseToPostId":"(.*?)","inResponseToPost"', data)
if not response_to:
story.data['response_to'] = ''
else:
story.data['response_to'] = response_to[0]
return story
def get_following(user_id):
url = 'https://medium.com/_/api/users/' + user_id + '/following'
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
following = re.findall('"username":"(.*?)","createdAt"', data)
following_set = set(following)
to = re.findall('"to":"(.*?)"}}},"v"', data)
while to:
url = 'https://medium.com/_/api/users/' + user_id + '/following?to=' + to[0]
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
following = re.findall('"username":"(.*?)","createdAt"', data)
following_set.update(following)
to = re.findall('"to":"(.*?)"}}},"v"', data)
return list(following_set)
def get_followers(user_id):
url = 'https://medium.com/_/api/users/' + user_id + '/followers'
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
followers = re.findall('"username":"(.*?)","createdAt"', data)
followers_set = set(followers)
to = re.findall('"to":"(.*?)"}}},"v"', data)
while to:
url = 'https://medium.com/_/api/users/' + user_id + '/followers?to=' + to[0]
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
followers = re.findall('"username":"(.*?)","createdAt"', data)
followers_set.update(followers)
to = re.findall('"to":"(.*?)"}}},"v"', data)
return list(followers_set)
def get_latest(user_id):
url = 'https://medium.com/_/api/users/' + user_id + '/profile/stream?source=latest'
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
latest = re.findall('"postId":"(.*?)"},"randomId"', data)
latest_set = set(latest)
to = re.findall('"to":"(.*?)","source":"latest"', data)
while to:
url = 'https://medium.com/_/api/users/' + user_id + '/profile/stream?source=latest&to=' + to[0]
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
latest = re.findall('"postId":"(.*?)"},"randomId"', data)
latest_set.update(latest)
to = re.findall('"to":"(.*?)","source":"latest"', data)
return list(latest_set)
def get_recommends(user_id):
url = 'https://medium.com/_/api/users/' + user_id + '/profile/stream?source=has-recommended'
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
recommends = re.findall('w":{"postId":"(.*?)"},"randomId"', data)
recommends_set = set(recommends)
to = re.findall('"to":"(.*?)","source":"has-recommended"', data)
while to:
url = 'https://medium.com/_/api/users/' + user_id + '/profile/stream?source=has-recommended&to=' + to[0]
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
recommends = re.findall('w":{"postId":"(.*?)"},"randomId"', data)
recommends_set.update(recommends)
to = re.findall('"to":"(.*?)","source":"has-recommended"', data)
return list(recommends_set)
def get_highlights(user_id):
url = 'https://medium.com/_/api/users/' + user_id + '/profile/stream?source=quotes'
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
highlights = re.findall('","postId":"(.*?)","userId":"', data)
highlights_set = set(highlights)
to = re.findall('"to":"(.*?)","source":"quotes"', data)
while to:
url = 'https://medium.com/_/api/users/' + user_id + '/profile/stream?source=quotes&to=' + to[0]
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
highlights = re.findall('","postId":"(.*?)","userId":"', data)
highlights_set.update(highlights)
to = re.findall('"to":"(.*?)","source":"quotes"', data)
return list(highlights_set)
def get_responses(user_id):
url = 'https://medium.com/_/api/users/' + user_id + '/profile/stream?source=responses'
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
responses = re.findall('w":{"postId":"(.*?)"},"randomId"', data)
responses_set = set(responses)
to = re.findall('"to":"(.*?)","source":"responses"', data)
while to:
url = 'https://medium.com/_/api/users/' + user_id + '/profile/stream?source=responses&to=' + to[0]
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
responses = re.findall('w":{"postId":"(.*?)"},"randomId"', data)
responses_set.update(responses)
to = re.findall('"to":"(.*?)","source":"responses"', data)
return list(responses_set)
def mark_visited(username):
conn = mysql.connector.connect(host=variable.host, port=3306, user=variable.username, password=variable.password,
database='Medium', charset='utf8')
cur = conn.cursor()
sql = "UPDATE users SET visited=1 WHERE username='%s'" % username
cur.execute(sql)
cur.close()
conn.commit()
conn.close()
def mark_failed(username):
print('-----mark failed')
conn = mysql.connector.connect(host=variable.host, port=3306, user=variable.username, password=variable.password,
database='Medium', charset='utf8')
cur = conn.cursor()
sql = "UPDATE users SET failed=1 WHERE username='%s'" % username
cur.execute(sql)
cur.close()
conn.commit()
conn.close()
def post_exist(post):
conn = mysql.connector.connect(host=variable.host, port=3306, user=variable.username, password=variable.password,
database='Medium', charset='utf8')
cur = conn.cursor()
try:
sql = "INSERT INTO posts VALUE('%s', %s, %s)" % (post, 1, 0)
cur.execute(sql)
cur.close()
conn.commit()
conn.close()
return False
except:
cur.close()
conn.close()
return True
def get_posts(user):
post_list = user.data['latest'] + user.data['recommends'] + user.data['highlights'] + user.data['responses']
post_list = list(set(post_list))
for post in post_list:
if not post_exist(post):
out = codecs.open("./Posts/%s.json" % post, 'w', 'utf-8')
out.write(get_story(post).getstr())
out.close()
for post in user.data['responses']:
post = get_story(post).data['response_to']
if post and (not post_exist(post)):
out = codecs.open("./Posts/%s.json" % post, 'w', 'utf-8')
out.write(get_story(post).getstr())
out.close()
def get_twitter_profile(username, twitter_id):
url = "https://twitter.com/" + str(twitter_id) + "?lang=en"
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
response = opener.open(req, timeout=10)
data = response.read()
profile_data = re.findall('class="json-data" value="(.*?)">', data)
profile = json.loads(profile_data[0].replace('"', '"'))
profile.pop("promptbirdData", None)
profile.pop("wtfOptions", None)
profile.pop("typeaheadData", None)
profile.pop("dm", None)
profile.pop("initialState", None)
profile.pop("activeHashflags", None)
profile.pop("keyboardShortcuts", None)
profile.pop("deciders", None)
out = codecs.open("./Twitter/%s_t.json" % username, 'w', 'utf-8')
out.write(json.dumps(profile, indent=4))
out.close()
def mark_visited_twitter(username, twitter_id):
conn = mysql.connector.connect(host=variable.host, port=3306, user=variable.username, password=variable.password,
database='Medium', charset='utf8')
cur = conn.cursor()
sql = "INSERT INTO twitter VALUE('%s', '%s', %s, %s)" % (username, twitter_id, 1, 0)
cur.execute(sql)
cur.close()
conn.commit()
conn.close()
def mark_failed_twitter(username, twitter_id):
conn = mysql.connector.connect(host=variable.host, port=3306, user=variable.username, password=variable.password,
database='Medium', charset='utf8')
cur = conn.cursor()
sql = "UPDATE twitter SET failed=1 WHERE username='%s' and twitter_id='%s'" % (username, twitter_id)
cur.execute(sql)
cur.close()
conn.commit()
conn.close()
def get_facebook_profile(username, user_id):
print(user_id)
user = FBUser()
user.data['user_id'] = user_id
login_url = 'https://m.facebook.com/login'
s = requests.session()
login_data = {
'email': facebook_variable.username,
'pass': facebook_variable.password
}
s.post(login_url, login_data)
time.sleep(1)
url = 'https://facebook.com/' + user_id
response = s.get(url)
data = response.content
URL = re.findall('URL=/(.*?)\?_fb_noscript=1', data)
if URL:
user.data['URL'] = URL[0]
else:
user.data['URL'] = user_id
print(user.data['URL'])
time.sleep(1)
url = 'https://m.facebook.com/' + user.data['URL']
response = s.get(url)
data = response.content
name = re.findall('<title>(.*?)</title>', data)
if name:
user.data['Name'] = name[0]
else:
print('-----no Name to show')
if user.data['Name'] == 'Page Not Found':
print('-----blocked')
mark_failed_facebook(username, user_id)
return
friends = re.findall('See All Friends \((.*?)\)</a>', data)
if friends:
user.data['Friends'] = int(friends[0])
else:
print('-----no Friends to show')
current_city = re.findall('Current City<(.*?)a>', data)
if current_city:
current_city = re.findall('<a href="/(.*?)/', current_city[0])
if current_city:
current_city = re.findall('>(.*?)<', current_city[0])
if current_city:
user.data['Current City'] = current_city[0]
else:
print('-----no Current City to show')
hometown = re.findall('Hometown<(.*?)a>', data)
if hometown:
hometown = re.findall('<a href="/(.*?)/', hometown[0])
if hometown:
hometown = re.findall('>(.*?)<', hometown[0])
if hometown:
user.data['Hometown'] = hometown[0]
else:
print('-----no Hometown to show')
birthday = re.findall('Birthday</span></div></td><td(.*?)div>', data)
if birthday:
birthday = re.findall('><(.*?)/', birthday[0])
if birthday:
birthday = re.findall('>(.*?)<', birthday[0])
if birthday:
user.data['Birthday'] = birthday[0]
else:
birthday = re.findall('Birth Year</span></div></td><td(.*?)div>', data)
if birthday:
birthday = re.findall('><(.*?)/', birthday[0])
if birthday:
birthday = re.findall('>(.*?)<', birthday[0])
if birthday:
user.data['Birthday'] = birthday[0]
else:
print('-----no Birthday to show')
gender = re.findall('Gender</span></div></td><td(.*?)div>', data)
if gender:
gender = re.findall('><(.*?)/', gender[0])
if gender:
gender = re.findall('>(.*?)<', gender[0])
if gender:
user.data['Gender'] = gender[0]
else:
print('-----no Gender to show')
languages = re.findall('Languages</span></div></td><td(.*?)div>', data)
if languages:
languages = re.findall('><(.*?)/', languages[0])
if languages:
languages = re.findall('>(.*?)<', languages[0])
if languages:
user.data['Languages'] = languages[0]
else:
print('-----no Languages to show')
out = codecs.open("./Facebook/%s_fb.json" % username, 'w', 'utf-8')
out.write(user.getstr())
out.close()
def mark_visited_facebook(username, facebook_id):
conn = mysql.connector.connect(host=variable.host, port=3306, user=variable.username, password=variable.password,
database='Medium', charset='utf8')
cur = conn.cursor()
sql = "INSERT INTO facebook VALUE('%s', '%s', %s, %s)" % (username, facebook_id, 1, 0)
cur.execute(sql)
cur.close()
conn.commit()
conn.close()
def mark_failed_facebook(username, facebook_id):
conn = mysql.connector.connect(host=variable.host, port=3306, user=variable.username, password=variable.password,
database='Medium', charset='utf8')
cur = conn.cursor()
sql = "UPDATE facebook SET failed=1 WHERE username='%s' and facebook_id='%s'" % (username, facebook_id)
cur.execute(sql)
cur.close()
conn.commit()
conn.close()
def get_user(username):
if not os.path.exists('./Users'):
os.mkdir('./Users')
if not os.path.exists('./Posts'):
os.mkdir('./Posts')
if not os.path.exists('./Twitter'):
os.mkdir('./Twitter')
if not os.path.exists('./Facebook'):
os.mkdir('./Facebook')
print(username)
user = User()
url = 'https://medium.com/@' + username
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header("User-agent", 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36')
try:
response = opener.open(req, timeout=10)
except:
print('-----fail to get data')
mark_failed(username)
return
data = response.read()
profile = re.findall('"userMeta":(.*?)"UserMeta"}', data)
if not profile:
print('-----fail to get profile')
mark_failed(username)
return
else:
user.data['profile'] = json.loads(profile[0]+'"UserMeta"}')
print('-----profile')
user_id = user.data['profile']['user']['userId']
try:
user.data['following'] = get_following(user_id)
print('-----following')
except:
print('-----fail to get following')
mark_failed(username)
return
try:
user.data['followers'] = get_followers(user_id)
print('-----followers')
except:
print('-----fail to get followers')
mark_failed(username)
return
conn = mysql.connector.connect(host=variable.host, port=3306, user=variable.username, password=variable.password,
database='Medium', charset='utf8')
cur = conn.cursor()
for following in user.data['following']:
try:
sql = "INSERT INTO users VALUE('%s', %s, %s, '%s')" % (following, 0, 0, variable.ip)
cur.execute(sql)
conn.commit()
variable.queue.append(following)
except:
continue
for follower in user.data['followers']:
try:
sql = "INSERT INTO users VALUE('%s', %s, %s, '%s')" % (follower, 0, 0, variable.ip)
cur.execute(sql)
conn.commit()
variable.queue.append(follower)
except:
continue
cur.close()
conn.close()
try:
user.data['latest'] = get_latest(user_id)
print('-----latest')
except:
print('-----fail to get latest')
mark_failed(username)
return
try:
user.data['recommends'] = get_recommends(user_id)
print('-----recommends')
except:
print('-----fail to get recommends')
mark_failed(username)
return
try:
user.data['highlights'] = get_highlights(user_id)
print('-----highlights')
except:
print('-----fail to get highlights')
mark_failed(username)
return
try:
user.data['responses'] = get_responses(user_id)
print('-----responses')
except:
print('-----fail to get responses')
mark_failed(username)
return
out = codecs.open("./Users/%s.json" % username, 'w', 'utf-8')
out.write(user.getstr())
out.close()
try:
get_posts(user)
print('-----posts')
except:
print('-----fail to get posts')
twitter_id = user.data['profile']['user']['twitterScreenName']
print('-----twitter: ' + twitter_id)
if twitter_id:
try:
mark_visited_twitter(username, twitter_id)
get_twitter_profile(username, twitter_id)
print('-----twitter')
except:
mark_failed_twitter(username, twitter_id)
print('-----fail to get Twitter')
facebook_id = user.data['profile']['user']['facebookAccountId']
print('-----facebook: ' + facebook_id)
if facebook_id:
try:
mark_visited_facebook(username, facebook_id)
get_facebook_profile(username, facebook_id)
print('-----facebook')
except:
mark_failed_facebook(username, facebook_id)
print('-----fail to get Facebook')
print("-----%s obtained" % username)
def get_queue(ip):
conn = mysql.connector.connect(host=variable.host, port=3306, user=variable.username, password=variable.password, database='Medium', charset='utf8')
cur = conn.cursor()
sql = "SELECT username FROM users WHERE visited=0 and failed=0 and ip='%s'" % ip
cur.execute(sql)
result = cur.fetchall()
cur.close()
conn.commit()
conn.close()
queue = []
for user in result:
queue.append(user[0])
for i in range(5):
random.shuffle(queue)
return queue
def bfs():
while variable.queue:
username = variable.queue.pop(0)
mark_visited(username)
try:
get_user(username)
except:
print('fail to get user')
if __name__ == '__main__':
variable.queue = get_queue(variable.ip)
bfs()
| {
"content_hash": "947eb869d2d2009a46e90b8c6085f01e",
"timestamp": "",
"source": "github",
"line_count": 750,
"max_line_length": 152,
"avg_line_length": 35.568,
"alnum_prop": 0.5761358524516419,
"repo_name": "lifei96/Medium_Crawler",
"id": "00b2fa7b3da5aec216accfeabee057c850c8621a",
"size": "26701",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "User_Crawler/medium_crawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54689"
},
{
"name": "Shell",
"bytes": "97"
}
],
"symlink_target": ""
} |
import re
import json
from urlparse import urlparse
from scrapy.selector import Selector
try:
from scrapy.spiders import Spider
except:
from scrapy.spiders import BaseSpider as Spider
from scrapy.utils.response import get_base_url
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors.sgml import SgmlLinkExtractor as sle
from .log import *
'''
1. 默认取sel.css()[0],如否则需要'__unique':False or __list:True
2. 默认字典均为css解析,如否则需要'__use':'dump'表明是用于dump数据
'''
class CommonSpider(CrawlSpider):
auto_join_text = True
''' # css rule example:
all_css_rules = {
'.zm-profile-header': {
'.zm-profile-header-main': {
'__use':'dump',
'name':'.title-section .name::text',
'sign':'.title-section .bio::text',
'location':'.location.item::text',
'business':'.business.item::text',
'employment':'.employment.item::text',
'position':'.position.item::text',
'education':'.education.item::text',
'education_extra':'.education-extra.item::text',
}, '.zm-profile-header-operation': {
'__use':'dump',
'agree':'.zm-profile-header-user-agree strong::text',
'thanks':'.zm-profile-header-user-thanks strong::text',
}, '.profile-navbar': {
'__use':'dump',
'asks':'a[href*=asks] .num::text',
'answers':'a[href*=answers] .num::text',
'posts':'a[href*=posts] .num::text',
'collections':'a[href*=collections] .num::text',
'logs':'a[href*=logs] .num::text',
},
}, '.zm-profile-side-following': {
'__use':'dump',
'followees':'a.item[href*=followees] strong::text',
'followers':'a.item[href*=followers] strong::text',
}
}
'''
# Extract content without any extra spaces.
# NOTE: If content only has spaces, then it would be ignored.
def extract_item(self, sels):
contents = []
for i in sels:
content = re.sub(r'\s+', ' ', i.extract())
if content != ' ':
contents.append(content)
return contents
def extract_items(self, sel, rules, item):
for nk, nv in rules.items():
if nk in ('__use', '__list'):
continue
if nk not in item:
item[nk] = []
if sel.css(nv):
# item[nk] += [i.extract() for i in sel.css(nv)]
# Without any extra spaces:
item[nk] += self.extract_item(sel.css(nv))
else:
item[nk] = []
# 1. item是一个单独的item,所有数据都聚合到其中 *merge
# 2. 存在item列表,所有item归入items
def traversal(self, sel, rules, item_class, item, items):
# print 'traversal:', sel, rules.keys()
if item is None:
item = item_class()
if '__use' in rules:
if '__list' in rules:
unique_item = item_class()
self.extract_items(sel, rules, unique_item)
items.append(unique_item)
else:
self.extract_items(sel, rules, item)
else:
for nk, nv in rules.items():
for i in sel.css(nk):
self.traversal(i, nv, item_class, item, items)
DEBUG=True
def debug(sth):
if DEBUG == True:
print(sth)
def deal_text(self, sel, item, force_1_item, k, v):
if v.endswith('::text') and self.auto_join_text:
item[k] = ' '.join(self.extract_item(sel.css(v)))
else:
_items = self.extract_item(sel.css(v))
if force_1_item:
if len(_items) >= 1:
item[k] = _items[0]
else:
item[k] = ''
else:
item[k] = _items
keywords = set(['__use', '__list'])
def traversal_dict(self, sel, rules, item_class, item, items, force_1_item):
#import pdb; pdb.set_trace()
item = {}
for k, v in rules.items():
if type(v) != dict:
if k in self.keywords:
continue
if type(v) == list:
continue
self.deal_text(sel, item, force_1_item, k, v)
#import pdb;pdb.set_trace()
else:
item[k] = []
for i in sel.css(k):
#print(k, v)
self.traversal_dict(i, v, item_class, item, item[k], force_1_item)
items.append(item)
def dfs(self, sel, rules, item_class, force_1_item):
if sel is None:
return []
items = []
if item_class != dict:
self.traversal(sel, rules, item_class, None, items, force_1_item)
else:
self.traversal_dict(sel, rules, item_class, None, items, force_1_item)
return items
def parse_with_rules(self, response, rules, item_class, force_1_item=False):
return self.dfs(Selector(response), rules, item_class, force_1_item)
''' # use parse_with_rules example:
def parse_people_with_rules(self, response):
item = self.parse_with_rules(response, self.all_css_rules, ZhihuPeopleItem)
item['id'] = urlparse(response.url).path.split('/')[-1]
info('Parsed '+response.url) # +' to '+str(item))
return item
'''
| {
"content_hash": "3d7809a5456478baff1a87be9deeb2dd",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 86,
"avg_line_length": 34.41875,
"alnum_prop": 0.5091701470855275,
"repo_name": "geekan/scrapy-css-rule-spider",
"id": "72ef7e3fe53f5fb3f49dd3762341f40a81d86061",
"size": "5635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/spider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "65452"
},
{
"name": "Shell",
"bytes": "582"
}
],
"symlink_target": ""
} |
import re
from collections import OrderedDict
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from model_utils.models import TimeStampedModel
class Base(TimeStampedModel):
class Meta:
abstract = True
def __str__(self):
return self.name
name = models.CharField(max_length=100)
class XWSBase(Base):
class Meta:
abstract = True
id = models.IntegerField(unique=True, primary_key=True)
xws = models.CharField(max_length=100)
class Faction(Base):
XWS_MAP = {
"Galactic Empire": "imperial",
"First Order": "imperial",
"Rebel Alliance": "rebel",
"Resistance": "rebel",
"Scum and Villainy": "scum",
}
xws = models.CharField(max_length=100)
def icon(self):
FACTION_MAP = {
"Galactic Empire": "empire",
"First Order": "firstorder",
"Rebel Alliance": "rebel",
"Resistance": "rebel",
"Scum and Villainy": "scum",
}
return FACTION_MAP[self.name]
def pilot_icon(self):
FACTION_MAP = {
"Galactic Empire": "helmet-imperial",
"First Order": "helmet-imperial",
"Rebel Alliance": "helmet-rebel",
"Resistance": "helmet-rebel",
"Scum and Villainy": "helmet-scum",
}
return FACTION_MAP[self.name]
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
self.xws = self.XWS_MAP[self.name]
super().save(force_insert, force_update, using, update_fields)
class Action(Base):
pass
class BaseSize(Base):
pass
class SlotType(Base):
pass
class StatisticSet(models.Model):
def __str__(self):
return "{}/{}/{}/{}/{}".format(
self.skill,
self.attack,
self.agility,
self.hull,
self.shields
)
def skill_dict(self):
d = OrderedDict()
d["skill"] = self.skill
d["attack"] = self.attack
d["agility"] = self.agility
d["hull"] = self.hull
d["shield"] = self.shields
return d
skill = models.IntegerField(default=0)
attack = models.IntegerField()
agility = models.IntegerField()
hull = models.IntegerField()
shields = models.IntegerField()
class Ship(XWSBase):
faction = models.ManyToManyField(Faction)
stats = models.OneToOneField(StatisticSet)
actions = models.ManyToManyField(Action)
size = models.ForeignKey(BaseSize)
class Pilot(XWSBase):
class Meta:
unique_together = ('xws', 'faction', 'ship')
is_unique = models.BooleanField(default=False)
ship = models.ForeignKey(Ship)
points = models.IntegerField()
skill = models.IntegerField()
ability = models.TextField(blank=True, null=True)
image = models.CharField(max_length=300, blank=True, null=True)
faction = models.ForeignKey(Faction)
ship_override = models.ForeignKey(StatisticSet, blank=True, null=True)
class Slot(models.Model):
def __str__(self):
return self.slot_type.__str__()
slot_type = models.ForeignKey(SlotType)
pilot = models.ForeignKey(Pilot)
class GrantType(Base):
pass
class Grant(Base):
limit = models.Q(app_label='xwing_data', model='Action') | \
models.Q(app_label='xwing_data', model='SlotType') | \
models.Q(app_label='xwing_data', model='StatisticsSet')
content_type = models.ForeignKey(
ContentType,
limit_choices_to=limit,
on_delete=models.CASCADE
)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class Upgrade(XWSBase):
def __str__(self):
return re.sub("\([+\-]\d\)", "", self.name).strip() # Just for adaptability at the moment, get rid of the +/-1
def static_image_url(self):
return self.image.url.replace('')
is_unique = models.BooleanField(default=False)
is_limited = models.BooleanField(default=False)
text = models.TextField(blank=True, null=True)
slot = models.ForeignKey(SlotType)
image = models.CharField(max_length=300, blank=True, null=True)
points = models.IntegerField()
energy = models.IntegerField(default=0)
faction = models.ManyToManyField(Faction, blank=True)
range = models.CharField(max_length=5, blank=True, null=True)
attack = models.IntegerField(default=0)
ships = models.ManyToManyField(Ship, blank=True)
size = models.ManyToManyField(BaseSize, blank=True)
grants = models.ManyToManyField(Grant, blank=True)
class DamageDeck(Base):
pass
class DamageType(Base):
pass
class DamageCard(Base):
amount = models.IntegerField(default=0)
type = models.ForeignKey(DamageType)
deck = models.ForeignKey(DamageDeck)
text = models.TextField(blank=True, null=True)
image = models.CharField(max_length=300, blank=True, null=True)
| {
"content_hash": "684b988f87ef85db93a6e69ad609f936",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 119,
"avg_line_length": 27.048128342245988,
"alnum_prop": 0.6352313167259787,
"repo_name": "sheepeatingtaz/xwingoverlayer",
"id": "dfb3e47bc893f54d8fbed59df40a6fee43493f1c",
"size": "5058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xwing_data/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9434"
},
{
"name": "HTML",
"bytes": "21694"
},
{
"name": "JavaScript",
"bytes": "5176"
},
{
"name": "Python",
"bytes": "74520"
},
{
"name": "Shell",
"bytes": "5558"
}
],
"symlink_target": ""
} |
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__all__ = ['catalog', 'testing', 'http_util']
| {
"content_hash": "e19d1fa39b7b2dccc69b99b0a9911d48",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 45,
"avg_line_length": 27.8,
"alnum_prop": 0.6618705035971223,
"repo_name": "MoonRaker/siphon",
"id": "6cabd7ddcd421258105ed9bfecddb459f7296fb1",
"size": "329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "siphon/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "245468"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function
import pytest
import os
from oar.lib import (db, AdmissionRule)
from oar.lib.submission import (parse_resource_descriptions, add_micheline_jobs, set_not_cli)
@pytest.yield_fixture(scope='function', autouse=True)
def minimal_db_initialization(request):
db.delete_all()
with db.session(ephemeral=True):
db['Queue'].create(name='default', priority=3, scheduler_policy='kamelot', state='Active')
# add some resources
for i in range(5):
db['Resource'].create(network_address="localhost" + str(int(i / 2)))
db.session.execute(AdmissionRule.__table__.delete())
db['AdmissionRule'].create(rule="name='yop'")
yield
@pytest.fixture(scope='function', autouse=True)
def not_cli():
set_not_cli()
def default_job_vars(resource_request):
return {
'job_type': 'PASSIVE',
'resource_request': resource_request,
'name': 'yop',
'project': 'yop',
'command': 'sleep',
'info_type': '',
'queue_name': 'default',
'properties': '',
'checkpoint': 0,
'signal': 12,
'notify': '',
'types': None,
'launching_directory': '/tmp',
'dependencies': None,
'stdout': None,
'stderr': None,
'hold': None,
'initial_request': 'foo',
'user': os.environ['USER'],
'array_id': 0,
'start_time': '0',
'reservation_field': None
}
def test_add_micheline_jobs_1():
default_resources = '/resource_id=1'
resource_request = parse_resource_descriptions(None, default_resources, 'resource_id')
job_vars = default_job_vars(resource_request)
reservation_date = ''
use_job_key = False
import_job_key_inline = ''
import_job_key_file = ''
export_job_key_file = ''
initial_request = 'yop'
array_nb = 0
array_params = []
# print(job_vars)
(err, job_id_lst) = add_micheline_jobs(job_vars, reservation_date, use_job_key,
import_job_key_inline, import_job_key_file,
export_job_key_file,
initial_request, array_nb, array_params)
print("job id:", job_id_lst)
assert len(job_id_lst) == 1
def test_add_micheline_jobs_2():
default_resources = '/resource_id=1'
resource_request = parse_resource_descriptions(None, default_resources, 'resource_id')
job_vars = default_job_vars(resource_request)
job_vars['stdout'] = 'yop'
job_vars['stderr'] = 'poy'
job_vars['types'] = 'foo'
reservation_date = ''
use_job_key = False
import_job_key_inline = ''
import_job_key_file = ''
export_job_key_file = ''
initial_request = 'yop'
array_nb = 0
array_params = []
# print(job_vars)
(err, job_id_lst) = add_micheline_jobs(job_vars, reservation_date, use_job_key,
import_job_key_inline, import_job_key_file,
export_job_key_file,
initial_request, array_nb, array_params)
print("job id:", job_id_lst)
assert len(job_id_lst) == 1
def test_add_micheline_simple_array_job():
default_resources = 'network_address=2/resource_id=1+/resource_id=2'
resource_request = parse_resource_descriptions(None, default_resources, 'resource_id')
job_vars = default_job_vars(resource_request)
job_vars['types'] = 'foo'
reservation_date = ''
use_job_key = False
import_job_key_inline = ''
import_job_key_file = ''
export_job_key_file = ''
initial_request = 'yop'
array_nb = 5
array_params = [str(i) for i in range(array_nb)]
# print(job_vars)
(err, job_id_lst) = add_micheline_jobs(job_vars, reservation_date, use_job_key,
import_job_key_inline, import_job_key_file,
export_job_key_file,
initial_request, array_nb, array_params)
r = db['JobResourceGroup'].query.all()
for item in r:
print(item.to_dict())
r = db['JobResourceDescription'].query.all()
for item in r:
print(item.to_dict())
print("job id:", job_id_lst)
assert len(job_id_lst) == 5
| {
"content_hash": "693ac9508d54bdc92e2fdfed21365745",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 98,
"avg_line_length": 30.706293706293707,
"alnum_prop": 0.5650193577772717,
"repo_name": "fr0uty/oartm",
"id": "7c740d0e4e9222bd96ae19daba1fa713f80d5548",
"size": "4407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/lib/test_submission.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2765"
},
{
"name": "Perl",
"bytes": "1000"
},
{
"name": "Python",
"bytes": "601158"
},
{
"name": "Shell",
"bytes": "5491"
}
],
"symlink_target": ""
} |
import os
import numpy as np
from ludwig.constants import *
from ludwig.decoders.sequence_decoders import DECODER_REGISTRY
from ludwig.encoders.sequence_encoders import ENCODER_REGISTRY as SEQUENCE_ENCODER_REGISTRY
from ludwig.encoders.text_encoders import *
from ludwig.features.base_feature import InputFeature
from ludwig.features.base_feature import OutputFeature
from ludwig.modules.loss_modules import SampledSoftmaxCrossEntropyLoss
from ludwig.modules.loss_modules import SequenceLoss
from ludwig.modules.metric_modules import EditDistanceMetric, \
SequenceAccuracyMetric
from ludwig.modules.metric_modules import PerplexityMetric
from ludwig.modules.metric_modules import SequenceLastAccuracyMetric
from ludwig.modules.metric_modules import SequenceLossMetric
from ludwig.modules.metric_modules import TokenAccuracyMetric
from ludwig.utils.math_utils import softmax
from ludwig.utils.metrics_utils import ConfusionMatrix
from ludwig.utils.misc_utils import set_default_value
from ludwig.utils.strings_utils import PADDING_SYMBOL
from ludwig.utils.strings_utils import UNKNOWN_SYMBOL
from ludwig.utils.strings_utils import build_sequence_matrix
from ludwig.utils.strings_utils import create_vocabulary
logger = logging.getLogger(__name__)
class SequenceFeatureMixin(object):
type = SEQUENCE
preprocessing_defaults = {
'sequence_length_limit': 256,
'most_common': 20000,
'padding_symbol': PADDING_SYMBOL,
'unknown_symbol': UNKNOWN_SYMBOL,
'padding': 'right',
'tokenizer': 'space',
'lowercase': False,
'vocab_file': None,
'missing_value_strategy': FILL_WITH_CONST,
'fill_value': UNKNOWN_SYMBOL
}
@staticmethod
def cast_column(feature, dataset_df, backend):
return dataset_df
@staticmethod
def get_feature_meta(column, preprocessing_parameters, backend):
column = column.astype(str)
idx2str, str2idx, str2freq, max_length, _, _, _ = create_vocabulary(
column, preprocessing_parameters['tokenizer'],
lowercase=preprocessing_parameters['lowercase'],
num_most_frequent=preprocessing_parameters['most_common'],
vocab_file=preprocessing_parameters['vocab_file'],
unknown_symbol=preprocessing_parameters['unknown_symbol'],
padding_symbol=preprocessing_parameters['padding_symbol'],
processor=backend.df_engine
)
max_length = min(
preprocessing_parameters['sequence_length_limit'],
max_length
)
return {
'idx2str': idx2str,
'str2idx': str2idx,
'str2freq': str2freq,
'vocab_size': len(idx2str),
'max_sequence_length': max_length
}
@staticmethod
def feature_data(column, metadata, preprocessing_parameters, backend):
sequence_data = build_sequence_matrix(
sequences=column,
inverse_vocabulary=metadata['str2idx'],
tokenizer_type=preprocessing_parameters['tokenizer'],
length_limit=metadata['max_sequence_length'],
padding_symbol=preprocessing_parameters['padding_symbol'],
padding=preprocessing_parameters['padding'],
unknown_symbol=preprocessing_parameters['unknown_symbol'],
lowercase=preprocessing_parameters['lowercase'],
tokenizer_vocab_file=preprocessing_parameters[
'vocab_file'
],
processor=backend.df_engine
)
return sequence_data
@staticmethod
def add_feature_data(
feature,
input_df,
proc_df,
metadata,
preprocessing_parameters,
backend
):
sequence_data = SequenceInputFeature.feature_data(
input_df[feature[COLUMN]].astype(str),
metadata[feature[NAME]], preprocessing_parameters,
backend
)
proc_df[feature[PROC_COLUMN]] = sequence_data
return proc_df
class SequenceInputFeature(SequenceFeatureMixin, InputFeature):
encoder = 'embed'
max_sequence_length = None
def __init__(self, feature, encoder_obj=None):
super().__init__(feature)
self.overwrite_defaults(feature)
if encoder_obj:
self.encoder_obj = encoder_obj
else:
self.encoder_obj = self.initialize_encoder(feature)
def call(self, inputs, training=None, mask=None):
assert isinstance(inputs, tf.Tensor)
assert inputs.dtype == tf.int8 or inputs.dtype == tf.int16 or \
inputs.dtype == tf.int32 or inputs.dtype == tf.int64
assert len(inputs.shape) == 2
inputs_exp = tf.cast(inputs, dtype=tf.int32)
inputs_mask = tf.not_equal(inputs, 0)
lengths = tf.reduce_sum(tf.cast(inputs_mask, dtype=tf.int32), axis=1)
encoder_output = self.encoder_obj(
inputs_exp, training=training, mask=inputs_mask
)
encoder_output[LENGTHS] = lengths
return encoder_output
@classmethod
def get_input_dtype(cls):
return tf.int32
def get_input_shape(self):
return None,
@staticmethod
def update_config_with_metadata(
input_feature,
feature_metadata,
*args,
**kwargs
):
input_feature['vocab'] = feature_metadata['idx2str']
input_feature['max_sequence_length'] = feature_metadata[
'max_sequence_length']
@staticmethod
def populate_defaults(input_feature):
set_default_value(input_feature, TIED, None)
set_default_value(input_feature, 'encoder', 'parallel_cnn')
encoder_registry = SEQUENCE_ENCODER_REGISTRY
class SequenceOutputFeature(SequenceFeatureMixin, OutputFeature):
decoder = 'generator'
loss = {TYPE: SOFTMAX_CROSS_ENTROPY}
metric_functions = {LOSS: None, TOKEN_ACCURACY: None,
SEQUENCE_ACCURACY: None, LAST_ACCURACY: None,
PERPLEXITY: None, EDIT_DISTANCE: None}
default_validation_metric = LOSS
max_sequence_length = 0
num_classes = 0
def __init__(self, feature):
super().__init__(feature)
self.overwrite_defaults(feature)
self.decoder_obj = self.initialize_decoder(feature)
self._setup_loss()
self._setup_metrics()
def _setup_loss(self):
if self.loss[TYPE] == 'softmax_cross_entropy':
self.train_loss_function = SequenceLoss()
elif self.loss[TYPE] == 'sampled_softmax_cross_entropy':
self.train_loss_function = SampledSoftmaxCrossEntropyLoss(
decoder_obj=self.decoder_obj,
num_classes=self.num_classes,
feature_loss=self.loss,
name='train_loss'
)
else:
raise ValueError(
"Loss type {} is not supported. Valid values are "
"'softmax_cross_entropy' or "
"'sampled_softmax_cross_entropy'".format(self.loss[TYPE])
)
self.eval_loss_function = SequenceLossMetric()
def _setup_metrics(self):
self.metric_functions = {} # needed to shadow class variable
self.metric_functions[LOSS] = self.eval_loss_function
self.metric_functions[TOKEN_ACCURACY] = TokenAccuracyMetric()
self.metric_functions[SEQUENCE_ACCURACY] = SequenceAccuracyMetric()
self.metric_functions[LAST_ACCURACY] = SequenceLastAccuracyMetric()
self.metric_functions[PERPLEXITY] = PerplexityMetric()
self.metric_functions[EDIT_DISTANCE] = EditDistanceMetric()
# overrides super class OutputFeature.update_metrics() method
def update_metrics(self, targets, predictions):
for metric, metric_fn in self.metric_functions.items():
if metric == LOSS or metric == PERPLEXITY:
metric_fn.update_state(targets, predictions)
elif metric == LAST_ACCURACY:
metric_fn.update_state(targets, predictions[LAST_PREDICTIONS])
else:
metric_fn.update_state(targets, predictions[PREDICTIONS])
def logits(
self,
inputs,
target=None,
training=None
):
if training and target is not None:
return self.decoder_obj._logits_training(
inputs,
target=tf.cast(target, dtype=tf.int32),
training=training
)
else:
return inputs
def predictions(self, inputs, training=None):
# Generator Decoder
return self.decoder_obj._predictions_eval(inputs, training=training)
@classmethod
def get_output_dtype(cls):
return tf.int32
def get_output_shape(self):
return self.max_sequence_length,
@staticmethod
def update_config_with_metadata(
output_feature,
feature_metadata,
*args,
**kwargs
):
output_feature['num_classes'] = feature_metadata['vocab_size']
output_feature['max_sequence_length'] = (
feature_metadata['max_sequence_length']
)
if isinstance(output_feature[LOSS]['class_weights'], (list, tuple)):
if (len(output_feature[LOSS]['class_weights']) !=
output_feature['num_classes']):
raise ValueError(
'The length of class_weights ({}) is not compatible with '
'the number of classes ({}) for feature {}. '
'Check the metadata JSON file to see the classes '
'and their order and consider there needs to be a weight '
'for the <UNK> and <PAD> class too.'.format(
len(output_feature[LOSS]['class_weights']),
output_feature['num_classes'],
output_feature[COLUMN]
)
)
if output_feature[LOSS]['class_similarities_temperature'] > 0:
if 'class_similarities' in output_feature[LOSS]:
similarities = output_feature[LOSS]['class_similarities']
temperature = output_feature[LOSS][
'class_similarities_temperature']
curr_row = 0
first_row_length = 0
is_first_row = True
for row in similarities:
if is_first_row:
first_row_length = len(row)
is_first_row = False
curr_row += 1
else:
curr_row_length = len(row)
if curr_row_length != first_row_length:
raise ValueError(
'The length of row {} of the class_similarities '
'of {} is {}, different from the length of '
'the first row {}. All rows must have '
'the same length.'.format(
curr_row,
output_feature[COLUMN],
curr_row_length,
first_row_length
)
)
else:
curr_row += 1
all_rows_length = first_row_length
if all_rows_length != len(similarities):
raise ValueError(
'The class_similarities matrix of {} has '
'{} rows and {} columns, '
'their number must be identical.'.format(
output_feature[COLUMN],
len(similarities),
all_rows_length
)
)
if all_rows_length != output_feature['num_classes']:
raise ValueError(
'The size of the class_similarities matrix of {} is '
'{}, different from the number of classe ({}). '
'Check the metadata JSON file to see the classes '
'and their order and '
'consider <UNK> and <PAD> class too.'.format(
output_feature[COLUMN],
all_rows_length,
output_feature['num_classes']
)
)
similarities = np.array(similarities, dtype=np.float32)
for i in range(len(similarities)):
similarities[i, :] = softmax(
similarities[i, :],
temperature=temperature
)
output_feature[LOSS]['class_similarities'] = similarities
else:
raise ValueError(
'class_similarities_temperature > 0, '
'but no class_similarities are provided '
'for feature {}'.format(output_feature[COLUMN])
)
if output_feature[LOSS][TYPE] == 'sampled_softmax_cross_entropy':
output_feature[LOSS]['class_counts'] = [
feature_metadata['str2freq'][cls]
for cls in feature_metadata['idx2str']
]
@staticmethod
def calculate_overall_stats(
predictions,
targets,
train_set_metadata
):
overall_stats = {}
sequences = targets
last_elem_sequence = sequences[np.arange(sequences.shape[0]),
(sequences != 0).cumsum(1).argmax(1)]
confusion_matrix = ConfusionMatrix(
last_elem_sequence,
predictions[LAST_PREDICTIONS],
labels=train_set_metadata['idx2str']
)
overall_stats['confusion_matrix'] = confusion_matrix.cm.tolist()
overall_stats['overall_stats'] = confusion_matrix.stats()
overall_stats['per_class_stats'] = confusion_matrix.per_class_stats()
return overall_stats
def postprocess_predictions(
self,
result,
metadata,
output_directory,
skip_save_unprocessed_output=False,
):
postprocessed = {}
name = self.feature_name
npy_filename = os.path.join(output_directory, '{}_{}.npy')
if PREDICTIONS in result and len(result[PREDICTIONS]) > 0:
preds = result[PREDICTIONS].numpy()
lengths = result[LENGTHS].numpy()
if 'idx2str' in metadata:
postprocessed[PREDICTIONS] = [
[metadata['idx2str'][token]
if token < len(metadata['idx2str']) else UNKNOWN_SYMBOL
for token in [pred[i] for i in range(length)]]
for pred, length in
[(preds[j], lengths[j]) for j in range(len(preds))]
]
else:
postprocessed[PREDICTIONS] = preds
if not skip_save_unprocessed_output:
np.save(npy_filename.format(name, PREDICTIONS), preds)
del result[PREDICTIONS]
if LAST_PREDICTIONS in result and len(result[LAST_PREDICTIONS]) > 0:
last_preds = result[LAST_PREDICTIONS].numpy()
if 'idx2str' in metadata:
postprocessed[LAST_PREDICTIONS] = [
metadata['idx2str'][last_pred]
if last_pred < len(metadata['idx2str']) else UNKNOWN_SYMBOL
for last_pred in last_preds
]
else:
postprocessed[LAST_PREDICTIONS] = last_preds
if not skip_save_unprocessed_output:
np.save(npy_filename.format(name, LAST_PREDICTIONS),
last_preds)
del result[LAST_PREDICTIONS]
if PROBABILITIES in result and len(result[PROBABILITIES]) > 0:
probs = result[PROBABILITIES].numpy()
if probs is not None:
# probs should be shape [b, s, nc]
if len(probs.shape) == 3:
# get probability of token in that sequence position
seq_probs = np.amax(probs, axis=-1)
# sum log probability for tokens up to sequence length
# create mask only tokens for sequence length
mask = np.arange(seq_probs.shape[-1]) \
< np.array(result[LENGTHS]).reshape(-1, 1)
log_prob = np.sum(np.log(seq_probs) * mask, axis=-1)
# commenting probabilities out because usually it is huge:
# dataset x length x classes
# todo: add a mechanism for letting the user decide to save it
postprocessed[PROBABILITIES] = seq_probs
postprocessed[PROBABILITY] = log_prob
else:
raise ValueError(
'Sequence probability array should be 3-dimensional '
'shape, instead shape is {:d}-dimensional'
.format(len(probs.shape))
)
if not skip_save_unprocessed_output:
np.save(npy_filename.format(name, PROBABILITIES), seq_probs)
np.save(npy_filename.format(name, PROBABILITY), log_prob)
del result[PROBABILITIES]
if LENGTHS in result:
del result[LENGTHS]
return postprocessed
@staticmethod
def populate_defaults(output_feature):
set_default_value(
output_feature,
LOSS,
{
TYPE: 'softmax_cross_entropy',
'sampler': None,
'negative_samples': 0,
'distortion': 1,
'labels_smoothing': 0,
'class_weights': 1,
'robust_lambda': 0,
'confidence_penalty': 0,
'class_similarities_temperature': 0,
'weight': 1
}
)
set_default_value(output_feature[LOSS], TYPE,
'softmax_cross_entropy')
set_default_value(output_feature[LOSS], 'labels_smoothing', 0)
set_default_value(output_feature[LOSS], 'class_weights', 1)
set_default_value(output_feature[LOSS], 'robust_lambda', 0)
set_default_value(output_feature[LOSS], 'confidence_penalty', 0)
set_default_value(output_feature[LOSS],
'class_similarities_temperature', 0)
set_default_value(output_feature[LOSS], 'weight', 1)
if output_feature[LOSS][TYPE] == 'sampled_softmax_cross_entropy':
set_default_value(output_feature[LOSS], 'sampler', 'log_uniform')
set_default_value(output_feature[LOSS], 'negative_samples', 25)
set_default_value(output_feature[LOSS], 'distortion', 0.75)
else:
set_default_value(output_feature[LOSS], 'sampler', None)
set_default_value(output_feature[LOSS], 'negative_samples', 0)
set_default_value(output_feature[LOSS], 'distortion', 1)
set_default_value(output_feature[LOSS], 'unique', False)
set_default_value(output_feature, 'decoder', 'generator')
if output_feature['decoder'] == 'tagger':
set_default_value(output_feature, 'reduce_input', None)
set_default_value(output_feature, 'dependencies', [])
set_default_value(output_feature, 'reduce_input', SUM)
set_default_value(output_feature, 'reduce_dependencies', SUM)
decoder_registry = DECODER_REGISTRY
| {
"content_hash": "370605355e891b9e1dccefd87f2427c6",
"timestamp": "",
"source": "github",
"line_count": 500,
"max_line_length": 91,
"avg_line_length": 39.74,
"alnum_prop": 0.5587317564167086,
"repo_name": "uber/ludwig",
"id": "513b7fc929cc69b4f3e0590c9f90bd4ea5b743c5",
"size": "20580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ludwig/features/sequence_feature.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "466847"
},
{
"name": "Dockerfile",
"bytes": "635"
},
{
"name": "HTML",
"bytes": "292184"
},
{
"name": "JavaScript",
"bytes": "85725"
},
{
"name": "Python",
"bytes": "1241008"
}
],
"symlink_target": ""
} |
from bawt.subsystems.irrigation import Irrigation
import sys
if __name__ == "__main__":
zone = int(sys.argv[1])
state = sys.argv[2]
run_time = 0
if len(sys.argv) >= 4:
run_time = int(sys.argv[3])
irrigation = Irrigation()
if state == 'on':
irrigation.start(zone)
elif state == 'off':
irrigation.stop(zone)
elif state == 'timed':
irrigation.timed_run(zone, run_time)
| {
"content_hash": "3cb4493982fb3cd73f516cf3b4cb2635",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 49,
"avg_line_length": 22.842105263157894,
"alnum_prop": 0.5783410138248848,
"repo_name": "DoriftoShoes/bawt",
"id": "34b638c303473b2977a6433d0159692fe4110a50",
"size": "434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/testirrigation.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37846"
}
],
"symlink_target": ""
} |
from subprocess import call
import os
sitePath = os.path.normpath(os.path.dirname((os.path.join(os.getcwd(),os.path.dirname(__file__) ) ) ) )
siteDir = os.path.join(sitePath,"www")
# Copy Notebooks to the user space (leaving originals unchanged)
# Make symlinks for Data and Documentation so that these are visible
# to the web server started in the www build directory
print "Building {:s}".format(siteDir)
call("cd {:s} && mkdocs build --theme united --clean".format(sitePath), shell=True)
call("ln -s {:s}/Data/ {:s}".format(sitePath, siteDir), shell=True)
call("ln -s {:s}/Documentation/ {:s}".format(sitePath, siteDir), shell=True)
call("cp -r {:s}/Notebooks/ {:s}/Notebooks".format(sitePath,siteDir), shell=True)
call("find . -name \*.ipynb -print0 | xargs -0 jupyter trust", shell=True)
| {
"content_hash": "12fc8d991046b3b7e585de1eccf64cb3",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 103,
"avg_line_length": 44.44444444444444,
"alnum_prop": 0.70875,
"repo_name": "lmoresi/UoM-VIEPS-Intro-to-Python",
"id": "800388858358d8a50c9d74fa3ac4c3607e979bd2",
"size": "826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/run-sitebuilder.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4439"
},
{
"name": "Jupyter Notebook",
"bytes": "728450"
},
{
"name": "Python",
"bytes": "2183"
},
{
"name": "Shell",
"bytes": "885"
}
],
"symlink_target": ""
} |
import numpy as np
import cv2
img = cv2.imread("frclogo.jpg")
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_lim = np.array([0,155,155])
upper_lim = np.array([179,255,255])
# Threshold the HSV image to get highly saturated image
mask = cv2.inRange(hsv, lower_lim, upper_lim)
# find contours
image, contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# find biggist contour
biggestContourIndex = 0
for i in range(len(contours)):
if(cv2.contourArea(contours[i]) > cv2.contourArea(contours[biggestContourIndex])):
biggestContourIndex = i
# find contours above 1000 area
#bigContours = filter(lambda a: cv2.contourArea(a) > 1000, contours)
# find centroid from moments
M = cv2.moments(contours[biggestContourIndex])
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
print("centroid: " + str(cx) + ', ' + str(cy))
# find box around contour and it's center
rect = cv2.minAreaRect(contours[biggestContourIndex])
box = cv2.boxPoints(rect)
bx = int((box[0][0] + box[2][0])/2)
by = int((box[0][1] + box[2][1])/2)
print("center: " + str(bx) + ', ' + str(by))
box = np.int0(box)
# clear image
lower_lim = np.array([178,254,254])
mask = cv2.inRange(hsv, lower_lim, upper_lim)
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
# draw everything
mask = cv2.drawContours(mask,[box],0,(0,0,255),1)
mask = cv2.drawContours(mask, contours, biggestContourIndex, (0,255,0), 1)
mask = cv2.circle(mask,(cx,cy),4,(255,255,0),-1)
mask = cv2.circle(mask,(bx,by),4,(0,255,255),-1)
cv2.imshow("a",img)
cv2.imshow("b",mask)
cv2.moveWindow("b",600,50)
cv2.waitKey(0)
cv2.destroyAllWindows()
quit()
| {
"content_hash": "2bcca09ab9118d34277900234eeb8dc7",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 89,
"avg_line_length": 28.689655172413794,
"alnum_prop": 0.6923076923076923,
"repo_name": "Burtt/IllumicatsVision",
"id": "ef41f94b0fd7f6b64d65f87b3a16978a1a21c334",
"size": "1664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LaptopTests/cvtest1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12395"
}
],
"symlink_target": ""
} |
__all__ = ['soundcloud_download', 'soundcloud_download_by_id']
from ..common import *
import json
import urllib.error
client_id = 'WKcQQdEZw7Oi01KqtHWxeVSxNyRzgT8M'
def soundcloud_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False):
assert title
url = 'https://api.soundcloud.com/tracks/{}/{}?client_id={}'.format(id, 'stream', client_id)
type, ext, size = url_info(url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge = merge)
def soundcloud_i1_api(track_id):
url = 'https://api.soundcloud.com/i1/tracks/{}/streams?client_id={}'.format(track_id, client_id)
return json.loads(get_content(url))['http_mp3_128_url']
def soundcloud_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
url = 'https://api.soundcloud.com/resolve.json?url={}&client_id={}'.format(url, client_id)
metadata = get_content(url)
info = json.loads(metadata)
title = info["title"]
real_url = info.get('download_url')
if real_url is None:
real_url = info.get('steram_url')
if real_url is None:
raise Exception('Cannot get media URI for {}'.format(url))
real_url = soundcloud_i1_api(info['id'])
mime, ext, size = url_info(real_url)
print_info(site_info, title, mime, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge=merge)
site_info = "SoundCloud.com"
download = soundcloud_download
download_playlist = playlist_not_supported('soundcloud')
| {
"content_hash": "60a0602d4833988be35d742f6ff716fa",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 100,
"avg_line_length": 37.5,
"alnum_prop": 0.6653968253968254,
"repo_name": "zmwangx/you-get",
"id": "1a4061ffd0d7072d0f501586ef2dc702b5b5d37e",
"size": "1598",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/you_get/extractors/soundcloud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "805"
},
{
"name": "Python",
"bytes": "464961"
},
{
"name": "Shell",
"bytes": "2649"
}
],
"symlink_target": ""
} |
import setuptools
from setuptools.command.install import install
# Install Necessary R packages
class CustomInstallPackages(install):
"""Customized setuptools install command - runs R package install one-liner."""
def run(self):
import subprocess
import shlex
print "Attempting to install R packages...Please wait."
cmd =''' R -e "install.packages(c('optparse', 'gtools', 'klaR','survival', 'mvtnorm', 'modeltools', 'coin', 'MASS'), repos = 'http://cran.stat.ucla.edu')" '''
try:
subprocess.call(shlex.split(cmd))
print "Necessary R packages were sucessfully installed"
except:
print "Error installing R dependecies! Check to see if R is properly installed or see online documentation for more answers."
install.run(self)
# Pkg info
setuptools.setup(
name="koeken",
version="0.2.6",
url="https://github.com/twbattaglia/koeken",
author="Thomas W. Battaglia",
author_email="tb1280@nyu.edu",
description="A Linear Discriminant Analysis (LEfSe) wrapper.",
long_description=open('README.rst').read(),
keywords="Biology Microbiome LEfSe QIIME Formatting Diversity Python Bioinformatics",
scripts=['koeken/koeken.py', 'koeken/lefse_src/format_input.py', 'koeken/lefse_src/run_lefse.py', 'koeken/lefse_src/lefse.py', 'koeken/lefse_src/plot_cladogram.py', 'koeken/lefse_src/hclust2/hclust2.py', 'koeken/pretty_lefse.py'],
cmdclass={'install': CustomInstallPackages},
packages=setuptools.find_packages(),
install_requires=['rpy2', 'argparse', 'pandas', 'biopython'],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4'
]
)
| {
"content_hash": "5daa6c71e0fad8c5cffb8fa1f27a29d9",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 234,
"avg_line_length": 37.72549019607843,
"alnum_prop": 0.6626819126819127,
"repo_name": "twbattaglia/koeken",
"id": "37c17e101a38eff26bced363e8d7e0fe93fe0876",
"size": "1924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103636"
}
],
"symlink_target": ""
} |
import pytest
@pytest.mark.xfail
def test_get_bookmark():
raise NotImplementedError
| {
"content_hash": "2d428d5ed8bde34b0166af8f1b3d9430",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 29,
"avg_line_length": 15,
"alnum_prop": 0.7666666666666667,
"repo_name": "globus/globus-sdk-python",
"id": "ed3c08e4424510aa90d722edb14de5788c45392d",
"size": "90",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/functional/services/transfer/test_get_bookmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "303"
},
{
"name": "Makefile",
"bytes": "810"
},
{
"name": "Python",
"bytes": "896256"
},
{
"name": "Shell",
"bytes": "125"
}
],
"symlink_target": ""
} |
"""
https://leetcode.com/problems/most-common-word/
https://leetcode.com/submissions/detail/150204402/
"""
class Solution:
def mostCommonWord(self, paragraph, banned):
"""
:type paragraph: str
:type banned: List[str]
:rtype: str
"""
wordAcc = dict()
for word in paragraph.lower().replace(',', '').replace('.', '').replace('!', '').replace('?', '').replace('\'', '').replace(';', '').split(' '):
if word not in banned:
if word in wordAcc:
wordAcc[word] += 1
else:
wordAcc[word] = 1
maxCount = 0
ans = ''
for word in wordAcc:
count = wordAcc[word]
if count > maxCount:
maxCount = count
ans = word
return ans
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.mostCommonWord(
'Bob hit a ball, the hit BALL flew far after it was hit.', ['hit']), 'ball')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "76c85c5af93a2e6ba3127a1a09fba9fe",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 152,
"avg_line_length": 25.704545454545453,
"alnum_prop": 0.5057471264367817,
"repo_name": "vivaxy/algorithms",
"id": "cbdeba17a3cde7ee95f6b5010683d5e9c96cb7b7",
"size": "1131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/problems/most_common_word.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "130225"
},
{
"name": "Python",
"bytes": "272982"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
from watson.auth.providers import abc
class Provider(abc.Base):
defaults = {}
def _validate_configuration(self, config):
super(Provider, self)._validate_configuration(config)
def handle_request(self, request):
if not hasattr(request, 'user'):
request.user = None
if not request.user:
request.user = None
username = request.session[self.config['key']]
if username:
request.user = self.get_user(username)
def login(self, user, request):
request.user = user
request.session[self.config['key']] = getattr(
user, self.user_model_identifier)
def logout(self, request):
del request.session[self.config['key']]
request.user = None
| {
"content_hash": "fa364b67f425da5b93f9661a59dc0a62",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 61,
"avg_line_length": 28.962962962962962,
"alnum_prop": 0.6048593350383632,
"repo_name": "watsonpy/watson-auth",
"id": "6110c73cc373c5f2b550eba2d90db01b775fad45",
"size": "782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "watson/auth/providers/session/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4119"
},
{
"name": "Python",
"bytes": "79050"
}
],
"symlink_target": ""
} |
from unittest import TestCase
def calcular_frequencias(s):
dct = {}
for char in s:
dct[char] = dct.get(char, 0) + 1
return dct
def gerar_arvore_de_huffman(s):
dct = calcular_frequencias(s)
folhas = []
for folha in dct:
folhas.append(Folha(folha, dct[folha]))
folhas.sort(key=lambda folha: folha.peso)
folha = folhas.pop(0)
arvore = Arvore(folha.char, folha.peso)
while folhas:
folha = folhas.pop(0)
arvore2 = Arvore(folha.char, folha.peso)
arvore = arvore2.fundir(arvore)
return arvore
def codificar(cod_dict, s):
code = ""
for char in s:
if char in cod_dict.keys():
code += cod_dict[char]
return code
class Noh:
def __init__(self, peso, esquerdo=None, direito=None):
self.peso = peso
self.esquerdo = None
self.direito = None
def __hash__(self):
return hash(self.peso)
def __eq__(self, other):
if other is None or not isinstance(other, Noh):
return False
return self.peso == other.peso and self.esquerdo == other.esquerdo and self.direito == other.direito
class Folha():
def __init__(self, char=None, peso=None):
self.char = char
self.peso = peso
def __hash__(self):
return hash(self.__dict__)
def __eq__(self, other):
if other is None or not isinstance(other, Folha):
return False
return self.__dict__ == other.__dict__
class Arvore(object):
def __init__(self, char=None, peso=None):
if char:
self.raiz = Folha(char, peso)
else:
self.raiz = None
self.char = char
self.peso = peso
def __hash__(self):
return hash(self.raiz)
def __eq__(self, other):
if other is None:
return False
return self.raiz == other.raiz
def fundir(self, arvore):
raiz = Noh(self.raiz.peso + arvore.raiz.peso)
raiz.esquerdo = self.raiz
raiz.direito = arvore.raiz
newArvore = Arvore()
newArvore.raiz = raiz
return newArvore
def cod_dict(self):
dct = {}
code = []
folhas = []
folhas.append(self.raiz)
while folhas:
atual = folhas.pop()
if isinstance(atual, Folha):
letra = atual.char
dct[letra] = ''.join(code)
code.pop()
code.append('1')
else:
folhas.append(atual.direito)
folhas.append(atual.esquerdo)
code.append('0')
return dct
def decodificar(self, codigo):
dct = []
pos = self.raiz
if isinstance(pos, Folha):
return pos.char
else:
for i in codigo:
if i == '0':
pos = pos.esquerdo
else:
pos = pos.direito
if isinstance(pos, Folha):
dct.append(pos.char)
pos = self.raiz
return "".join(dct)
class CalcularFrequenciaCarecteresTestes(TestCase):
def teste_string_vazia(self):
self.assertDictEqual({}, calcular_frequencias(''))
def teste_string_nao_vazia(self):
self.assertDictEqual({'a': 3, 'b': 2, 'c': 1}, calcular_frequencias('aaabbc'))
class NohTestes(TestCase):
def teste_folha_init(self):
folha = Folha('a', 3)
self.assertEqual('a', folha.char)
self.assertEqual(3, folha.peso)
def teste_folha_eq(self):
self.assertEqual(Folha('a', 3), Folha('a', 3))
self.assertNotEqual(Folha('a', 3), Folha('b', 3))
self.assertNotEqual(Folha('a', 3), Folha('a', 2))
self.assertNotEqual(Folha('a', 3), Folha('b', 2))
def testes_eq_sem_filhos(self):
self.assertEqual(Noh(2), Noh(2))
self.assertNotEqual(Noh(2), Noh(3))
def testes_eq_com_filhos(self):
noh_com_filho = Noh(2)
noh_com_filho.esquerdo = Noh(3)
self.assertNotEqual(Noh(2), noh_com_filho)
def teste_noh_init(self):
noh = Noh(3)
self.assertEqual(3, noh.peso)
self.assertIsNone(noh.esquerdo)
self.assertIsNone(noh.direito)
def _gerar_arvore_aaaa_bb_c():
raiz = Noh(7)
raiz.esquerdo = Folha('a', 4)
noh = Noh(3)
raiz.direito = noh
noh.esquerdo = Folha('b', 2)
noh.direito = Folha('c', 1)
arvore_esperada = Arvore()
arvore_esperada.raiz = raiz
return arvore_esperada
class ArvoreTestes(TestCase):
def teste_init_com_defaults(self):
arvore = Arvore()
self.assertIsNone(arvore.raiz)
def teste_init_sem_defaults(self):
arvore = Arvore('a', 3)
self.assertEqual(Folha('a', 3), arvore.raiz)
def teste_fundir_arvores_iniciais(self):
raiz = Noh(3)
raiz.esquerdo = Folha('b', 2)
raiz.direito = Folha('c', 1)
arvore_esperada = Arvore()
arvore_esperada.raiz = raiz
arvore = Arvore('b', 2)
arvore2 = Arvore('c', 1)
arvore_fundida = arvore.fundir(arvore2)
self.assertEqual(arvore_esperada, arvore_fundida)
def teste_fundir_arvores_nao_iniciais(self):
arvore_esperada = _gerar_arvore_aaaa_bb_c()
arvore = Arvore('b', 2)
arvore2 = Arvore('c', 1)
arvore3 = Arvore('a', 4)
arvore_fundida = arvore.fundir(arvore2)
arvore_fundida = arvore3.fundir(arvore_fundida)
self.assertEqual(arvore_esperada, arvore_fundida)
def teste_gerar_dicionario_de_codificacao(self):
arvore = _gerar_arvore_aaaa_bb_c()
self.assertDictEqual({'a': '0', 'b': '10', 'c': '11'}, arvore.cod_dict())
def teste_decodificar(self):
arvore = _gerar_arvore_aaaa_bb_c()
self.assertEqual('aaaabbc', arvore.decodificar('0000101011'))
class TestesDeIntegracao(TestCase):
def teste_gerar_arvore_de_huffman(self):
arvore = _gerar_arvore_aaaa_bb_c()
self.assertEqual(arvore, gerar_arvore_de_huffman('aaaabbc'))
def teste_codificar(self):
arvore = gerar_arvore_de_huffman('aaaabbc')
self.assertEqual('0000101011', codificar(arvore.cod_dict(), 'aaaabbc'))
self.assertEqual('aaaabbc', arvore.decodificar('0000101011'))
| {
"content_hash": "be3f486ce02ee26d2aa011258d32bec8",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 108,
"avg_line_length": 27.748898678414097,
"alnum_prop": 0.5680266709001429,
"repo_name": "juancanuto/estrutura-de-dados",
"id": "3e70e8447a87a9ece4d36b1e25736acd942e7d73",
"size": "6299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "huffman.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16194"
}
],
"symlink_target": ""
} |
import re
__src__ = file('lib/__init__.py').read()
__doc__ = re.search('^(["\']{3})(.*?)\\1', __src__, re.M|re.S).group(2).strip()
__version__ = re.search('^__version__\s*=\s*(["\'])(.*?)\\1\s*$', __src__, re.M).group(2).strip()
options = dict(
minver = '2.6', # Min Python version required.
maxver = None, # Max Python version required.
use_stdeb = False, # Use stdeb for building deb packages.
use_markdown_readme = True, # Use markdown README.md.
)
properties = dict(
name = 'Flask-MIME-Encoders',
version = __version__,
url = 'https://github.com/salsita/flask-mime-encoders',
download_url = 'https://github.com/salsita/flask-mime-encoders/tarball/v{}'.format( __version__),
description = __doc__.strip().split('\n', 1)[0].strip('.'),
# First non-empty line of module doc
long_description = (__doc__.strip() + '\n').split('\n', 1)[1].strip(),
# Module doc except first non-empty line
author = 'Salsita Software',
author_email = 'python@salsitasoft.com',
license = 'MIT',
zip_safe = True,
platforms = 'any',
keywords = [
'Flask',
'MIME',
'JSON',
'REST',
'API',
],
py_modules = [
],
packages = [
'flask_mime_encoders',
],
package_dir = {
'flask_mime_encoders': 'lib',
},
namespace_packages = [
],
include_package_data = False,
install_requires = [
'Flask>=0.5',
],
extras_require = {
},
dependency_links=[
],
entry_points = {
},
scripts=[
],
classifiers = [
# See http://pypi.python.org/pypi?:action=list_classifiers
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Flask',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Text Processing :: Markup',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
def main(properties=properties, options=options, **custom_options):
"""Imports and runs setup function with given properties."""
return init(**dict(options, **custom_options))(**properties)
def init(
dist='dist',
minver=None,
maxver=None,
use_markdown_readme=True,
use_stdeb=False,
use_distribute=False,
):
"""Imports and returns a setup function.
If use_markdown_readme is set,
then README.md is added to setuptools READMES list.
If use_stdeb is set on a Debian based system,
then module stdeb is imported.
Stdeb supports building deb packages on Debian based systems.
The package should only be installed on the same system version
it was built on, though. See http://github.com/astraw/stdeb.
If use_distribute is set, then distribute_setup.py is imported.
"""
if not minver == maxver == None:
import sys
if not minver <= sys.version < (maxver or 'Any'):
sys.stderr.write(
'%s: requires python version in <%s, %s), not %s\n' % (
sys.argv[0], minver or 'any', maxver or 'any', sys.version.split()[0]))
sys.exit(1)
if use_distribute:
from distribute_setup import use_setuptools
use_setuptools(to_dir=dist)
from setuptools import setup
else:
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if use_markdown_readme:
try:
import setuptools.command.sdist
setuptools.command.sdist.READMES = tuple(list(getattr(setuptools.command.sdist, 'READMES', ()))
+ ['README.md'])
except ImportError:
pass
if use_stdeb:
import platform
if 'debian' in platform.dist():
try:
import stdeb
except ImportError:
pass
return setup
if __name__ == '__main__':
main()
| {
"content_hash": "91d1f2d3d6afd37607d8454f7969eca0",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 107,
"avg_line_length": 31.942028985507246,
"alnum_prop": 0.5628402903811253,
"repo_name": "salsita/flask-mime-encoders",
"id": "ab6d77557ef718e099826c752200800439f4ddc2",
"size": "4431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9008"
}
],
"symlink_target": ""
} |
"""
Slacker's adisp library wrapper.
Removes the need to call 'adisp.async(proceed)' manually on yield for Slacker
and Postponed objects: they can now be yielded directly.
"""
from slacker.adisp._adisp import CallbackDispatcher, async
from slacker.postpone import Postponed, Slacker
class _SlackerCallbackDispatcher(CallbackDispatcher):
def call(self, callers):
if hasattr(callers, '__iter__'):
callers = map(self._prepare, callers)
else:
callers = self._prepare(callers)
return super(_SlackerCallbackDispatcher, self).call(callers)
def _prepare(self, func):
if isinstance(func, (Postponed, Slacker)):
return async(func.proceed)()
return func
def process(func):
def wrapper(*args, **kwargs):
_SlackerCallbackDispatcher(func(*args, **kwargs))
return wrapper
| {
"content_hash": "4c7233841fd8292fe3fc2f7b9846cf1d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 28.833333333333332,
"alnum_prop": 0.6797687861271676,
"repo_name": "kmike/tornado-slacker",
"id": "a48478d0dbb78047b94b485a23e255bf2f5f9098",
"size": "865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slacker/adisp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23518"
}
],
"symlink_target": ""
} |
"""@file crossentropy_multi_loss.py
contains the CrossEntropyMultiLoss"""
import tensorflow as tf
import loss_computer
from nabu.neuralnetworks.components import ops
class CrossEntropyMultiLoss(loss_computer.LossComputer):
"""A loss computer that calculates the loss"""
def __call__(self, targets, logits, seq_length=None):
"""
Compute the loss
Creates the operation to compute the crossentropy multi loss
Args:
targets: a dictionary of [batch_size x ... x ...] tensor containing
the targets
logits: a dictionary of [batch_size x ... x ...] tensors containing the logits
Returns:
loss: a scalar value containing the loss
norm: a scalar value indicating how to normalize the loss
"""
if 'av_anchors_time_flag' in self.lossconf and self.lossconf['av_anchors_time_flag'] in ['true', 'True']:
av_anchors_time_flag = True
else:
av_anchors_time_flag = False
if 'resh_logits' in self.lossconf and self.lossconf['resh_logits'] in ['true', 'True']:
resh_logits = True
else:
resh_logits = False
if 'allow_permutation' not in self.lossconf or self.lossconf['allow_permutation'] == 'True':
allow_permutation = True
else:
allow_permutation = False
spkids = targets['spkids']
logits = logits['spkest']
if av_anchors_time_flag:
logits = tf.reduce_mean(logits, 1)
if resh_logits:
nrS = spkids.get_shape()[1]
logits = tf.reshape(logits, [self.batch_size, nrS, -1])
loss, norm = ops.crossentropy_multi_loss(spkids, logits, self.batch_size, allow_permutation=allow_permutation)
return loss, norm
| {
"content_hash": "cc6f4552ec1e856ba8fc696b477aaabc",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 112,
"avg_line_length": 29.14814814814815,
"alnum_prop": 0.7058449809402796,
"repo_name": "JeroenZegers/Nabu-MSSS",
"id": "bb4baad0aff5417a075ffb95dfa8a49ed845ef47",
"size": "1574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nabu/neuralnetworks/loss_computers/crossentropy_multi_loss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "981104"
},
{
"name": "Shell",
"bytes": "4125"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.forms.models import BaseModelFormSet, modelformset_factory
from django.template import loader
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _
import floppyforms as forms
from argus.models import (Group, Transaction, Party, Share, Category,
URL_SAFE_CHARS)
from argus.tokens import token_generators
class PartyForm(forms.ModelForm):
class Meta:
widgets = {
'name': forms.TextInput,
}
class BaseGroupCreateFormSet(BaseModelFormSet):
def clean(self):
super(BaseGroupCreateFormSet, self).clean()
filled = sum([1 if form.cleaned_data else 0
for form in self.forms])
if filled < 2:
raise forms.ValidationError("Please enter the name of at least "
"two members to get started.")
def save(self):
while True:
slug = get_random_string(length=6, allowed_chars=URL_SAFE_CHARS)
if not Group.objects.filter(slug=slug).exists():
group = Group.objects.create(slug=slug)
category = Category.objects.create(name=Category.DEFAULT_NAME,
group=group)
group.default_category = category
group.save()
break
for form in self.forms:
form.instance.group = group
form.instance.party_type = Party.MEMBER
if form.instance.name:
form.save()
return group
save.alters_data = True
GroupCreateFormSet = modelformset_factory(
Party,
form=PartyForm,
formset=BaseGroupCreateFormSet,
extra=0,
min_num=1,
fields=('name',))
class GroupSlugInput(forms.SlugInput):
def get_context(self, name, value, attrs):
context = super(GroupSlugInput, self).get_context(name, value, attrs)
context['attrs']['pattern'] = Group.SLUG_REGEX
return context
class GroupForm(forms.ModelForm):
subject_template_name = "argus/mail/group_email_confirm_subject.txt"
body_template_name = "argus/mail/group_email_confirm_body.txt"
html_email_template_name = None
generator = token_generators['email_confirm']
class Meta:
model = Group
exclude = ('password', 'confirmed_email', 'created',)
widgets = {
'slug': GroupSlugInput,
'name': forms.TextInput,
'email': forms.EmailInput,
'currency': forms.TextInput,
'default_category': forms.Select,
}
def __init__(self, request, *args, **kwargs):
self.request = request
super(GroupForm, self).__init__(*args, **kwargs)
self.fields['default_category'].queryset = self.instance.categories.all()
self.fields['default_category'].empty_label = None
self.fields['default_category'].required = True
def save(self, *args, **kwargs):
instance = super(GroupForm, self).save(*args, **kwargs)
if 'email' in self.changed_data:
# Send confirmation link.
context = {
'group': instance,
'email': instance.email,
'site': get_current_site(self.request),
'token': self.generator.make_token(instance),
'protocol': 'https' if self.request.is_secure() else 'http',
}
from_email = settings.DEFAULT_FROM_EMAIL
subject = loader.render_to_string(self.subject_template_name,
context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(self.body_template_name, context)
if self.html_email_template_name:
html_email = loader.render_to_string(self.html_email_template_name,
context)
else:
html_email = None
send_mail(subject, body, from_email, [instance.email],
html_message=html_email)
return instance
class GroupAuthenticationForm(forms.Form):
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, group, request, *args, **kwargs):
self._group = group
self.request = request
super(GroupAuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
password = self.cleaned_data.get('password')
if password:
if self._group.check_password(password):
self.group = self._group
else:
raise forms.ValidationError("Incorrect password.")
return self.cleaned_data
class GroupChangePasswordForm(forms.ModelForm):
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
'password_incorrect': _("Your old password was entered incorrectly. "
"Please enter it again."),
}
old_password = forms.CharField(label=_("Old password"),
widget=forms.PasswordInput)
new_password1 = forms.CharField(label=_("New password"),
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"),
widget=forms.PasswordInput)
class Meta:
model = Group
fields = ()
def __init__(self, *args, **kwargs):
super(GroupChangePasswordForm, self).__init__(*args, **kwargs)
if not self.instance.password:
del self.fields['old_password']
def clean_old_password(self):
"""
Validates that the old_password field is correct (if present).
"""
old_password = self.cleaned_data["old_password"]
if not self.instance.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'],
code='password_incorrect',
)
return old_password
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
self.instance.set_password(self.cleaned_data['new_password1'])
if commit:
self.instance.save()
return self.instance
class GroupRelatedForm(forms.ModelForm):
class Meta:
exclude = ('group',)
widgets = {
'name': forms.TextInput,
}
def __init__(self, group, *args, **kwargs):
self.group = group
super(GroupRelatedForm, self).__init__(*args, **kwargs)
def _post_clean(self):
super(GroupRelatedForm, self)._post_clean()
self.instance.group = self.group
class TransactionForm(forms.ModelForm):
sharers = forms.ModelMultipleChoiceField(Party)
class Meta:
model = Transaction
widgets = {
'paid_by': forms.Select,
'paid_to': forms.Select,
'memo': forms.TextInput,
'amount': forms.NumberInput(attrs={'step': 0.01}),
'paid_at': forms.DateTimeInput,
'category': forms.Select,
'notes': forms.Textarea,
'split': forms.RadioSelect,
'sharers': forms.CheckboxSelectMultiple,
}
def __init__(self, group, *args, **kwargs):
super(TransactionForm, self).__init__(*args, **kwargs)
self.group = group
self.fields['category'].queryset = group.categories.all()
self.fields['category'].initial = group.default_category_id
self.members = group.parties.filter(party_type=Party.MEMBER)
self.fields['paid_by'].queryset = self.members
self.fields['paid_by'].empty_label = None
self.fields['paid_to'].queryset = group.parties.all()
self.fields['sharers'].queryset = self.members
self.initial['sharers'] = self.members
for member in self.members:
field = forms.DecimalField(decimal_places=2, min_value=0,
initial=0, label=member.name)
field.member = member
field.widget.attrs['step'] = 0.01
self.fields['member{}'.format(member.pk)] = field
if self.instance.pk and self.instance.is_manual():
for share in self.instance.shares.all():
field = 'member{}'.format(share.party_id)
self.fields[field].initial = float(share.numerator) / 100
@property
def member_fields(self):
for member in self.members:
yield self['member{}'.format(member.pk)]
def clean(self):
cleaned_data = super(TransactionForm, self).clean()
split = cleaned_data['split']
if cleaned_data['paid_by'] == cleaned_data['paid_to']:
raise forms.ValidationError("A party cannot pay themselves.")
if split == Transaction.SIMPLE:
if not cleaned_data.get('paid_to'):
raise forms.ValidationError("Simple transactions must be paid "
"to someone.")
elif split == Transaction.EVEN:
if cleaned_data['paid_to'] in cleaned_data['sharers']:
raise forms.ValidationError("A member cannot share in a "
"payment to themselves.")
else:
if cleaned_data.get('member{}'.format(cleaned_data['paid_to'].pk)):
raise forms.ValidationError("A member cannot share in a "
"payment to themselves.")
amounts = [cleaned_data['member{}'.format(member.pk)]
for member in self.members
if cleaned_data['member{}'.format(member.pk)]]
cleaned_total = sum(amounts)
if split == Transaction.PERCENT:
if cleaned_total != 100:
raise forms.ValidationError("Percentages must add up to "
"100.00%.")
if split == Transaction.AMOUNT:
if cleaned_total != cleaned_data['amount']:
raise forms.ValidationError("Share amounts must add up to "
"total cost.")
return cleaned_data
def save(self):
created = not self.instance.pk
instance = super(TransactionForm, self).save()
if not created:
instance.shares.all().delete()
if instance.split == Transaction.SIMPLE:
if not instance.paid_to.is_member():
Share.objects.create(
transaction=instance,
party=instance.paid_by,
numerator=1,
denominator=1,
amount=instance.amount)
elif instance.split == Transaction.EVEN:
shares = [(member, 1) for member in self.cleaned_data['sharers']]
Share.objects.create_split(instance, shares)
else:
cd = self.cleaned_data
member_numerators = [(member, cd['member{}'.format(member.pk)] * 100)
for member in self.members
if cd['member{}'.format(member.pk)]]
Share.objects.create_split(instance, member_numerators)
return instance
| {
"content_hash": "43fb022788dd8856c08b216de34bb9ab",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 83,
"avg_line_length": 38.81818181818182,
"alnum_prop": 0.5663265306122449,
"repo_name": "littleweaver/django-argus",
"id": "97e48fc68bf25882512f9f131c675d0a9f321ad9",
"size": "11956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "argus/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "178980"
},
{
"name": "JavaScript",
"bytes": "85374"
},
{
"name": "Python",
"bytes": "56575"
},
{
"name": "Ruby",
"bytes": "420"
}
],
"symlink_target": ""
} |
"""
Platform for the Daikin AC.
For more details about this component, please refer to the documentation
https://home-assistant.io/components/daikin/
"""
import asyncio
from datetime import timedelta
import logging
from socket import timeout
import async_timeout
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOSTS
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import Throttle
from . import config_flow # noqa pylint_disable=unused-import
from .const import KEY_HOST
REQUIREMENTS = ['pydaikin==0.9']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'daikin'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
COMPONENT_TYPES = ['climate', 'sensor']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(
CONF_HOSTS, default=[]
): vol.All(cv.ensure_list, [cv.string]),
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Establish connection with Daikin."""
if DOMAIN not in config:
return True
hosts = config[DOMAIN].get(CONF_HOSTS)
if not hosts:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={'source': config.SOURCE_IMPORT}))
for host in hosts:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': config.SOURCE_IMPORT},
data={
KEY_HOST: host,
}))
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Establish connection with Daikin."""
conf = entry.data
daikin_api = await daikin_api_setup(hass, conf[KEY_HOST])
if not daikin_api:
return False
hass.data.setdefault(DOMAIN, {}).update({entry.entry_id: daikin_api})
await asyncio.wait([
hass.config_entries.async_forward_entry_setup(entry, component)
for component in COMPONENT_TYPES
])
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
await asyncio.wait([
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in COMPONENT_TYPES
])
hass.data[DOMAIN].pop(config_entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return True
async def daikin_api_setup(hass, host):
"""Create a Daikin instance only once."""
from pydaikin.appliance import Appliance
try:
with async_timeout.timeout(10):
device = await hass.async_add_executor_job(Appliance, host)
except asyncio.TimeoutError:
_LOGGER.error("Connection to Daikin could not be established")
return None
except Exception: # pylint: disable=broad-except
_LOGGER.error("Unexpected error creating device")
return None
name = device.values['name']
api = DaikinApi(device, name)
return api
class DaikinApi:
"""Keep the Daikin instance in one place and centralize the update."""
def __init__(self, device, name):
"""Initialize the Daikin Handle."""
self.device = device
self.name = name
self.ip_address = device.ip
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, **kwargs):
"""Pull the latest data from Daikin."""
try:
self.device.update_status()
except timeout:
_LOGGER.warning(
"Connection failed for %s", self.ip_address
)
@property
def mac(self):
"""Return mac-address of device."""
return self.device.values.get(CONNECTION_NETWORK_MAC)
@property
def device_info(self):
"""Return a device description for device registry."""
info = self.device.values
return {
'connections': {(CONNECTION_NETWORK_MAC, self.mac)},
'identifieres': self.mac,
'manufacturer': 'Daikin',
'model': info.get('model'),
'name': info.get('name'),
'sw_version': info.get('ver').replace('_', '.'),
}
| {
"content_hash": "77ced524e44570745d18984cc9da78c9",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 29.45205479452055,
"alnum_prop": 0.6404651162790698,
"repo_name": "tinloaf/home-assistant",
"id": "86ad6c0a1601ece94146953c76e8c310051f000f",
"size": "4300",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/daikin/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
} |
"""
Created on Wed Dec 3 15:01:34 2014
@author: Matti Ropo
@author: Henrik Levämäki
"""
from pyemto.utilities.utils import *
| {
"content_hash": "2903e0639d50a0f23cd6ead5e17c6fb6",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 36,
"avg_line_length": 16,
"alnum_prop": 0.7109375,
"repo_name": "hpleva/pyemto",
"id": "1ac0a5832ed3dfc0109f28ef1870098664b3c5a8",
"size": "154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyemto/utilities/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "343491"
},
{
"name": "Python",
"bytes": "583901"
}
],
"symlink_target": ""
} |
"""Gauge watcher implementations."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from faucet.valve_util import dpid_log
from faucet.gauge_influx import GaugePortStateInfluxDBLogger, GaugePortStatsInfluxDBLogger, GaugeFlowTableInfluxDBLogger
from faucet.gauge_nsodbc import GaugeFlowTableDBLogger
from faucet.gauge_pollers import GaugePortStateBaseLogger, GaugePortStatsPoller, GaugeFlowTablePoller
from faucet.gauge_prom import GaugePortStatsPrometheusPoller
def watcher_factory(conf):
"""Return a Gauge object based on type.
Arguments:
gauge_conf -- a GaugeConf object with the configuration for this valve.
"""
WATCHER_TYPES = {
'port_state': {
'text': GaugePortStateLogger,
'influx': GaugePortStateInfluxDBLogger,
},
'port_stats': {
'text': GaugePortStatsLogger,
'influx': GaugePortStatsInfluxDBLogger,
'prometheus': GaugePortStatsPrometheusPoller,
},
'flow_table': {
'text': GaugeFlowTableLogger,
'gaugedb': GaugeFlowTableDBLogger,
'influx': GaugeFlowTableInfluxDBLogger,
},
}
w_type = conf.type
db_type = conf.db_type
if w_type in WATCHER_TYPES and db_type in WATCHER_TYPES[w_type]:
return WATCHER_TYPES[w_type][db_type]
return None
def _rcv_time(rcv_time):
return time.strftime('%b %d %H:%M:%S', time.localtime(rcv_time))
class GaugePortStateLogger(GaugePortStateBaseLogger):
"""Abstraction for port state logger."""
def update(self, rcv_time, dp_id, msg):
rcv_time_str = _rcv_time(rcv_time)
reason = msg.reason
port_no = msg.desc.port_no
ofp = msg.datapath.ofproto
log_msg = 'port %s unknown state %s' % (port_no, reason)
if reason == ofp.OFPPR_ADD:
log_msg = 'port %s added' % port_no
elif reason == ofp.OFPPR_DELETE:
log_msg = 'port %s deleted' % port_no
elif reason == ofp.OFPPR_MODIFY:
link_down = (msg.desc.state & ofp.OFPPS_LINK_DOWN)
if link_down:
log_msg = 'port %s down' % port_no
else:
log_msg = 'port %s up' % port_no
log_msg = '%s %s' % (dpid_log(dp_id), log_msg)
self.logger.info(log_msg)
if self.conf.file:
with open(self.conf.file, 'a') as logfile:
logfile.write('\t'.join((rcv_time_str, log_msg)) + '\n')
@staticmethod
def send_req():
"""Send a stats request to a datapath."""
raise NotImplementedError
@staticmethod
def no_response():
"""Called when a polling cycle passes without receiving a response."""
raise NotImplementedError
class GaugePortStatsLogger(GaugePortStatsPoller):
"""Abstraction for port statistics logger."""
@staticmethod
def _update_line(rcv_time_str, stat_name, stat_val):
return '\t'.join((rcv_time_str, stat_name, str(stat_val))) + '\n'
def update(self, rcv_time, dp_id, msg):
super(GaugePortStatsLogger, self).update(rcv_time, dp_id, msg)
rcv_time_str = _rcv_time(rcv_time)
for stat in msg.body:
port_name = self._stat_port_name(msg, stat, dp_id)
with open(self.conf.file, 'a') as logfile:
log_lines = []
for stat_name, stat_val in self._format_port_stats('-', stat):
dp_port_name = '-'.join((
self.dp.name, port_name, stat_name))
log_lines.append(
self._update_line(
rcv_time_str, dp_port_name, stat_val))
logfile.writelines(log_lines)
class GaugeFlowTableLogger(GaugeFlowTablePoller):
"""Periodically dumps the current datapath flow table as a yaml object.
Includes a timestamp and a reference ($DATAPATHNAME-flowtables). The
flow table is dumped as an OFFlowStatsReply message (in yaml format) that
matches all flows.
"""
def update(self, rcv_time, dp_id, msg):
super(GaugeFlowTableLogger, self).update(rcv_time, dp_id, msg)
rcv_time_str = _rcv_time(rcv_time)
jsondict = msg.to_jsondict()
with open(self.conf.file, 'a') as logfile:
ref = '-'.join((self.dp.name, 'flowtables'))
logfile.write(
'\n'.join((
'---',
'time: %s' % rcv_time_str,
'ref: %s' % ref,
'msg: %s' % json.dumps(jsondict, indent=4))))
| {
"content_hash": "c524001ed9ee85088819bf99c63d9889",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 120,
"avg_line_length": 37.111111111111114,
"alnum_prop": 0.6175149700598802,
"repo_name": "byllyfish/faucet",
"id": "7e3f22b8fba9fce2af20e494c654712aef985bb0",
"size": "5344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "faucet/watcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2931"
},
{
"name": "Python",
"bytes": "681126"
},
{
"name": "Shell",
"bytes": "9269"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Author.slug'
db.add_column(u'articles_author', 'slug',
self.gf('autoslug.fields.AutoSlugField')(unique_with=(), max_length=50, blank=True, populate_from=None, unique=True, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Author.slug'
db.delete_column(u'articles_author', 'slug')
models = {
u'accounts.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"})
},
u'articles.article': {
'Meta': {'object_name': 'Article'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['articles.Author']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['taxonomy.ContentCategory']", 'symmetrical': 'False'}),
'content_detail': ('ckeditor.fields.RichTextField', [], {}),
'content_list': ('ckeditor.fields.RichTextField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'meta_description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'blank': 'True', 'populate_from': "'title'", 'unique': 'True', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'})
},
u'articles.author': {
'Meta': {'object_name': 'Author'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'bio': ('ckeditor.fields.RichTextField', [], {}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'googleplus': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'linkedin': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'blank': 'True', 'populate_from': 'None', 'unique': 'True', 'null': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.User']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'youtube': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'taxonomy.contentcategory': {
'Meta': {'object_name': 'ContentCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'name'", 'blank': 'True'})
}
}
complete_apps = ['articles'] | {
"content_hash": "eafc5ece711b61e2225ce83c8935f31e",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 194,
"avg_line_length": 75.32631578947368,
"alnum_prop": 0.553381777529346,
"repo_name": "AmandaCMS/amanda-cms",
"id": "86be4bdb8ff661ec9152c281c6eedd261592cb0e",
"size": "7180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amanda/articles/migrations/0007_auto__add_field_author_slug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "456"
},
{
"name": "Python",
"bytes": "311130"
}
],
"symlink_target": ""
} |
import logging
from random import shuffle
from unittest.mock import MagicMock
# # TODO? Stop mocking (See blame commit message and possibly PR for
# # discussion.)
# from aqt import mw
mw = MagicMock()
from mamba import describe, it, context
from crowd_anki.config.config_settings import ConfigSettings, NoteSortingMethods
from crowd_anki.export.note_sorter import NoteSorter
test_guids = ["abc", "bcd", "cde", "def", "efg", "fgh"]
test_flags = [0, 1, 2, 3, 4, 5]
test_tags = ["adjectives", "directions", "interesting", "nouns", "verbs", "zzzzFinal"]
test_notemodels = ["Default", "LL Noun", "LL Sentence", "LL Verb", "LL Word", "Zulu"]
test_notemodelids = test_guids
test_fields = test_tags
note_sorting_single_result_pairs = [
(NoteSortingMethods.GUID, test_guids),
(NoteSortingMethods.FLAG, test_flags),
(NoteSortingMethods.TAG, test_tags),
(NoteSortingMethods.NOTE_MODEL_NAME, test_notemodels),
(NoteSortingMethods.NOTE_MODEL_ID, test_notemodelids),
(NoteSortingMethods.FIELD1, test_fields),
(NoteSortingMethods.FIELD2, test_fields)
]
test_multikey_notemodel_guid = [(notemodel, guid) for notemodel in test_notemodels for guid in test_guids]
class NoteSorterTester:
def __init__(self):
self.note_sorter = None
self.notes = []
self.sorted_notes = []
self.config = ConfigSettings(mw.addonManager, None, mw.pm)
@staticmethod
def get_single_note_mock(i):
note = MagicMock()
note.anki_object.guid = test_guids[i]
note.anki_object.flags = test_flags[i]
note.anki_object.tags = test_tags[i]
note.anki_object._model = {
"name": test_notemodels[i],
"crowdanki_uuid": test_notemodelids[i]
}
note.anki_object.fields = [test_fields[i], test_fields[i]]
return note
@staticmethod
def get_multikey_note_mock(i):
note = MagicMock()
note.anki_object.guid = test_multikey_notemodel_guid[i][1]
note.anki_object._model = {
"name": test_multikey_notemodel_guid[i][0]
}
return note
def setup_notes(self, is_multi_key: bool):
random_range = list(range(0, len(test_multikey_notemodel_guid if is_multi_key else test_guids)))
shuffle(random_range)
if is_multi_key:
notes_list = [self.get_multikey_note_mock(i) for i in random_range]
else:
notes_list = [self.get_single_note_mock(i) for i in random_range]
logging.info("Shuffled list: ", notes_list)
self.notes = notes_list
def sort_with(self, sort_methods, reverse_sort, is_multi_key=False):
self.setup_notes(is_multi_key)
self.config.export_note_sort_methods = sort_methods
self.config.export_notes_reverse_order = reverse_sort
self.note_sorter = NoteSorter(self.config)
self.sorted_notes = self.note_sorter.sort_notes(self.notes)
with describe(NoteSorterTester) as self:
with context("user sorts by each sort option"):
with it("do not sort / sort by none"):
self.tester = NoteSorterTester()
self.tester.sort_with([NoteSortingMethods.NO_SORTING.value], False)
assert (self.tester.sorted_notes == self.tester.notes)
with it("do not sort / sort by none, reversed"):
self.tester = NoteSorterTester()
self.tester.sort_with([NoteSortingMethods.NO_SORTING.value], True)
assert (self.tester.sorted_notes == list(reversed(self.tester.notes)))
with it("sorts by all sorting methods"):
for method, result in note_sorting_single_result_pairs:
self.tester = NoteSorterTester()
self.tester.sort_with([method], False)
assert ([NoteSorter.sorting_definitions[method](note) for note in self.tester.sorted_notes] == result)
with it("sorts by all single sorting methods, reversed"):
for method, result in note_sorting_single_result_pairs:
self.tester = NoteSorterTester()
self.tester.sort_with([method], True)
assert ([NoteSorter.sorting_definitions[method](note) for note in self.tester.sorted_notes
] == list(reversed(result)))
with it("sorts by two sorting methods, notemodels+guids"):
self.tester = NoteSorterTester()
self.tester.sort_with([NoteSortingMethods.NOTE_MODEL_NAME, NoteSortingMethods.GUID], False,
is_multi_key=True)
return_object = [
(NoteSorter.sorting_definitions[NoteSortingMethods.NOTE_MODEL_NAME](note),
NoteSorter.sorting_definitions[NoteSortingMethods.GUID](note))
for note in self.tester.sorted_notes
]
assert (return_object == test_multikey_notemodel_guid)
with it("sorts by two sorting methods, notemodels+guids, reversed"):
self.tester = NoteSorterTester()
self.tester.sort_with([NoteSortingMethods.NOTE_MODEL_NAME, NoteSortingMethods.GUID], True,
is_multi_key=True)
return_object = [
(NoteSorter.sorting_definitions[NoteSortingMethods.NOTE_MODEL_NAME](note),
NoteSorter.sorting_definitions[NoteSortingMethods.GUID](note))
for note in self.tester.sorted_notes
]
assert (return_object == list(reversed(test_multikey_notemodel_guid)))
| {
"content_hash": "50e2d216dec33fa2251b0aa1ec4bbe89",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 118,
"avg_line_length": 36.72666666666667,
"alnum_prop": 0.6331457614812126,
"repo_name": "Stvad/CrowdAnki",
"id": "859798b2190891cd4f05302ea324c4f0a6d481a2",
"size": "5509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/export/note_sorter_spec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5495"
},
{
"name": "Python",
"bytes": "152637"
},
{
"name": "Shell",
"bytes": "777"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from ..preprocess import Normalize12
def test_Normalize12_inputs():
input_map = dict(affine_regularization_type=dict(field='eoptions.affreg',
),
apply_to_files=dict(copyfile=True,
field='subj.resample',
),
bias_fwhm=dict(field='eoptions.biasfwhm',
),
bias_regularization=dict(field='eoptions.biasreg',
),
deformation_file=dict(copyfile=False,
field='subj.def',
mandatory=True,
xor=['image_to_align', 'tpm'],
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
image_to_align=dict(copyfile=True,
field='subj.vol',
mandatory=True,
xor=['deformation_file'],
),
jobtype=dict(usedefault=True,
),
matlab_cmd=dict(),
mfile=dict(usedefault=True,
),
out_prefix=dict(field='woptions.prefix',
usedefault=True,
),
paths=dict(),
sampling_distance=dict(field='eoptions.samp',
),
smoothness=dict(field='eoptions.fwhm',
),
tpm=dict(copyfile=False,
field='eoptions.tpm',
xor=['deformation_file'],
),
use_mcr=dict(),
use_v8struct=dict(min_ver='8',
usedefault=True,
),
warping_regularization=dict(field='eoptions.reg',
),
write_bounding_box=dict(field='woptions.bb',
),
write_interp=dict(field='woptions.interp',
),
write_voxel_sizes=dict(field='woptions.vox',
),
)
inputs = Normalize12.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Normalize12_outputs():
output_map = dict(deformation_field=dict(),
normalized_files=dict(),
normalized_image=dict(),
)
outputs = Normalize12.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| {
"content_hash": "c22b84d6aa7bff02426399adf5979082",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 77,
"avg_line_length": 26.66216216216216,
"alnum_prop": 0.633046122655854,
"repo_name": "mick-d/nipype",
"id": "9d537e34b11ec4b6cf91494ca23b6278d98c182b",
"size": "2027",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nipype/interfaces/spm/tests/test_auto_Normalize12.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "1999"
},
{
"name": "Python",
"bytes": "4607773"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy.ext import declarative
from nca47.objects import attributes as attr
class Nca47Base(models.ModelBase,
models.SoftDeleteMixin,
models.ModelIterator):
"""Base class for Nca47 Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
@declarative.declared_attr
def __tablename__(cls):
"""
Use the pluralized name of the class as the table.
eg. If class name is DnsServer, return dns_servers.
"""
cls_name_list = ['_' + s if s.isupper() else s for s in cls.__name__]
return ''.join(cls_name_list).lstrip('_').lower() + 's'
def __repr__(self):
"""sqlalchemy based automatic __repr__ method."""
items = ['%s=%r' % (col.name, getattr(self, col.name))
for col in self.__table__.columns]
return "<%s.%s[object at %x] {%s}>" % (self.__class__.__module__,
self.__class__.__name__,
id(self), ', '.join(items))
def next(self):
self.__next__()
def soft_delete(self, session):
"""Mark this object as deleted."""
self.deleted = True
self.deleted_at = timeutils.utcnow()
self.save(session=session)
BASE = declarative.declarative_base(cls=Nca47Base)
class HasTenant(object):
"""Tenant mixin, add to subclasses that have a tenant."""
tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), index=True)
class HasId(object):
"""id mixin, add to subclasses that have an id."""
id = sa.Column(sa.String(attr.UUID_LEN),
primary_key=True,
default=uuidutils.generate_uuid)
class HasOperationMode(object):
"""operation_fro mixin, add to subclasses that have an operation_fro."""
operation_fro = sa.Column(sa.String(attr.NAME_MAX_LEN),
default='AUTO')
class HasStatus(object):
"""Status mixin."""
status = sa.Column(sa.String(16), nullable=False)
| {
"content_hash": "7705e0e0814f380ed46d22a02cbf1c85",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 77,
"avg_line_length": 30.928571428571427,
"alnum_prop": 0.5815242494226328,
"repo_name": "willowd878/nca47",
"id": "38a822eef2aeac8aaa418a13f1008649afa85a20",
"size": "2165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nca47/db/sqlalchemy/models/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "43669"
}
],
"symlink_target": ""
} |
import py
import pkg_resources
class PyCodeCheckItem(py.test.collect.Item):
def __init__(self, ep, parent):
py.test.collect.Item.__init__(self, ep.name, parent)
self._ep = ep
def runtest(self):
c = py.io.StdCapture()
mod = self._ep.load()
found_errors, out, err = c.call(mod.check_file, self.fspath)
self.out, self.err = out, err
assert not found_errors
def repr_failure(self, exc_info):
return self.out
def reportinfo(self):
return (self.fspath, -1, "codecheck %s" % self._ep.name)
class PyCheckerCollector(py.test.collect.File):
def __init__(self, path, parent):
super(PyCheckerCollector, self).__init__(path, parent)
self.name += '[code-check]'
def collect(self):
entrypoints = pkg_resources.iter_entry_points('codechecker')
return [PyCodeCheckItem(ep, self) for ep in entrypoints]
def pytest_collect_file(path, parent):
if path.ext == '.py':
return PyCheckerCollector(path, parent)
| {
"content_hash": "8496619b64f601024461c1f482227660",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 68,
"avg_line_length": 26.641025641025642,
"alnum_prop": 0.6227141482194418,
"repo_name": "c0710204/mirrorsBistu",
"id": "f7eb30c3ebf600667a8fc4b733c72094b70aebbe",
"size": "1039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypi/bandersnatch/lib/python2.7/site-packages/codecheckers/plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4451282"
},
{
"name": "C++",
"bytes": "85589"
},
{
"name": "CSS",
"bytes": "13627"
},
{
"name": "Erlang",
"bytes": "2018"
},
{
"name": "JavaScript",
"bytes": "169379"
},
{
"name": "PHP",
"bytes": "1856746"
},
{
"name": "Perl",
"bytes": "804269"
},
{
"name": "Python",
"bytes": "5211268"
},
{
"name": "Shell",
"bytes": "497398"
},
{
"name": "TeX",
"bytes": "322428"
}
],
"symlink_target": ""
} |
from trayjenkins.event import Event
from pyjenkins.job import JobStatus
class IModel(object):
def status_changed_event(self):
"""
Event arguments: (status:str, message:str)
@rtype: trayjenkins.event.IEvent
"""
class IView(object):
def set_status(self, status, message):
"""
@type status: str
@type message: str
"""
class IStatusReader(object):
def status(self, jobs):
"""
@type jobs: [pyjenkins.job.Job]
@return String from pyjenkins.job.JobStatus
@rtype: str
"""
class IMessageComposer(object):
def message(self, jobs):
"""
@type jobs: [pyjenkins.job.Job]
@return Brief message describing the job statuses.
@rtype: str
"""
class Presenter(object):
def __init__(self, model, view):
"""
@type model: trayjenkins.status.IModel
@type view: trayjenkins.status.IView
"""
self._model = model
self._view = view
model.status_changed_event().register(self._on_model_status_changed)
def _on_model_status_changed(self, status, message):
self._view.set_status(status, message)
class DefaultMessageComposer(IMessageComposer):
def message(self, jobs):
"""
@type jobs: [pyjenkins.job.Job]
@return Brief message describing the job statuses.
@rtype: str
"""
result = ''
if jobs is not None:
if len(jobs) == 0:
result = 'No jobs'
else:
failing = [job.name for job in jobs if job.status == JobStatus.FAILING]
if failing:
result = 'FAILING:\n' + '\n'.join(failing)
else:
result = 'All active jobs pass'
return result
class StatusReader(IStatusReader):
def status(self, jobs):
"""
@type jobs: [pyjenkins.job.Job]
@return String from pyjenkins.job.JobStatus
@rtype: str
"""
result = JobStatus.OK
if jobs is None:
result = JobStatus.UNKNOWN
else:
for job in jobs:
if job.status is JobStatus.FAILING:
result = JobStatus.FAILING
break
return result
class Model(IModel):
def __init__(self,
jobs_model,
jobs_filter,
message_composer=DefaultMessageComposer(),
status_reader=StatusReader(),
status_changed_event=Event()):
"""
@type jobs_model: trayjenkins.jobs.IModel
@type jobs_filter: trayjenkins.jobs.IFilter
@type message_composer: trayjenkins.status.IMessageComposer
@type status_reader: trayjenkins.status.IStatusReader
@type status_changed_event: trayjenkins.event.Event
"""
self._jobs_filter = jobs_filter
self._message_composer = message_composer
self._status_reader = status_reader
self._status_changed_event = status_changed_event
self._lastStatus = JobStatus.UNKNOWN
self._lastMessage = None
jobs_model.jobs_updated_event().register(self._on_jobs_updated)
def _on_jobs_updated(self, job_models):
job_models = self._jobs_filter.filter_jobs(job_models)
jobs = [model.job for model in job_models]
status = self._status_reader.status(jobs)
message = self._message_composer.message(jobs)
if self._lastStatus != status or self._lastMessage != message:
self._status_changed_event.fire(status, message)
self._lastStatus = status
self._lastMessage = message
def status_changed_event(self):
"""
Event arguments: status:str
@rtype: trayjenkins.event.IEvent
"""
return self._status_changed_event
| {
"content_hash": "8b0aebd73b13ea1f1538cf374d4d2b58",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 87,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.574468085106383,
"repo_name": "coolhandmook/trayjenkins",
"id": "ee47b9b62137d55548506e7838475460c9885904",
"size": "3901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trayjenkins/status.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74341"
},
{
"name": "Shell",
"bytes": "192"
}
],
"symlink_target": ""
} |
class 123SeguroComPipeline(object):
def process_item(self, item, spider):
return item
| {
"content_hash": "c110835f6afdc2583d1c461d7923b49e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.7040816326530612,
"repo_name": "hanjihun/Car",
"id": "c1326e4c6df01b3fbda789d02d1948faa9a4d161",
"size": "292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "www_123seguro_com/www_123seguro_com/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105671"
}
],
"symlink_target": ""
} |
from mock import Mock, patch
from nose.tools import assert_equal
from pylons import app_globals as g
from alluratest.controller import setup_unit_test
from allura.model.repository import Commit
from forgesvn.model.svn import SVNImplementation
class TestSVNImplementation(object):
def setUp(self):
setup_unit_test()
def test_compute_tree_new(self):
self._test_compute_tree_new('/trunk/foo/')
self._test_compute_tree_new('/trunk/foo')
self._test_compute_tree_new('trunk/foo/')
self._test_compute_tree_new('trunk/foo')
@patch('allura.model.repository.LastCommitDoc.m.update_partial')
@patch('allura.model.repository.Tree.upsert')
@patch('allura.model.repository.Tree.query.get')
def _test_compute_tree_new(self, path, tree_get, tree_upsert, lcd_partial):
repo = Mock(fs_path=g.tmpdir + '/')
repo.name = 'code'
impl = SVNImplementation(repo)
impl._svn.info2 = Mock()
impl._svn.info2.return_value = [('foo', Mock())]
tree_get.return_value = None # no existing tree
commit = Commit()
commit._id = '5057636b9c1040636b81e4b1:6'
tree_upsert.return_value = (Mock(), True)
tree_id = impl.compute_tree_new(commit, path)
assert_equal(impl._svn.info2.call_args[0]
[0], 'file://' + g.tmpdir + '/code/trunk/foo')
assert lcd_partial.called
def test_last_commit_ids(self):
self._test_last_commit_ids('/trunk/foo/')
self._test_last_commit_ids('/trunk/foo')
self._test_last_commit_ids('trunk/foo/')
self._test_last_commit_ids('trunk/foo')
def _test_last_commit_ids(self, path):
repo = Mock(fs_path=g.tmpdir + '/')
repo.name = 'code'
repo._id = '5057636b9c1040636b81e4b1'
impl = SVNImplementation(repo)
impl._svn.info2 = Mock()
impl._svn.info2.return_value = [('trunk', Mock()), ('foo', Mock())]
impl._svn.info2.return_value[1][1].last_changed_rev.number = '1'
commit = Commit()
commit._id = '5057636b9c1040636b81e4b1:6'
entries = impl.last_commit_ids(commit, [path])
assert_equal(entries, {path.strip('/'): '5057636b9c1040636b81e4b1:1'})
assert_equal(impl._svn.info2.call_args[0]
[0], 'file://' + g.tmpdir + '/code/trunk')
@patch('forgesvn.model.svn.svn_path_exists')
def test__path_to_root(self, path_exists):
repo = Mock(fs_path=g.tmpdir + '/')
repo.name = 'code'
repo._id = '5057636b9c1040636b81e4b1'
impl = SVNImplementation(repo)
path_exists.return_value = False
# edge cases
assert_equal(impl._path_to_root(None), '')
assert_equal(impl._path_to_root(''), '')
assert_equal(impl._path_to_root('/some/path/'), '')
assert_equal(impl._path_to_root('some/path'), '')
# tags
assert_equal(impl._path_to_root('/some/path/tags/1.0/some/dir'),
'some/path/tags/1.0')
assert_equal(impl._path_to_root('/some/path/tags/1.0/'),
'some/path/tags/1.0')
assert_equal(impl._path_to_root('/some/path/tags/'), '')
# branches
assert_equal(impl._path_to_root('/some/path/branches/b1/dir'),
'some/path/branches/b1')
assert_equal(impl._path_to_root('/some/path/branches/b1/'),
'some/path/branches/b1')
assert_equal(impl._path_to_root('/some/path/branches/'), '')
# trunk
assert_equal(impl._path_to_root('/some/path/trunk/some/dir/'),
'some/path/trunk')
assert_equal(impl._path_to_root('/some/path/trunk'), 'some/path/trunk')
# with fallback to trunk
path_exists.return_value = True
assert_equal(impl._path_to_root(''), 'trunk')
assert_equal(impl._path_to_root('/some/path/'), 'trunk')
assert_equal(impl._path_to_root('/tags/'), 'trunk')
assert_equal(impl._path_to_root('/branches/'), 'trunk')
assert_equal(impl._path_to_root('/tags/1.0'), 'tags/1.0')
assert_equal(impl._path_to_root('/branches/branch'), 'branches/branch')
@patch('forgesvn.model.svn.svn_path_exists')
def test_update_checkout_url(self, svn_path_exists):
impl = SVNImplementation(Mock())
opts = impl._repo.app.config.options = {}
svn_path_exists.side_effect = lambda path: False
opts['checkout_url'] = 'invalid'
impl.update_checkout_url()
assert_equal(opts['checkout_url'], '')
svn_path_exists.side_effect = lambda path: path.endswith('trunk')
opts['checkout_url'] = 'invalid'
impl.update_checkout_url()
assert_equal(opts['checkout_url'], 'trunk')
svn_path_exists.side_effect = lambda path: path.endswith('trunk')
opts['checkout_url'] = ''
impl.update_checkout_url()
assert_equal(opts['checkout_url'], 'trunk')
| {
"content_hash": "3650a45d42514ac123b3bce5d2646017",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 79,
"avg_line_length": 42,
"alnum_prop": 0.5966505246166263,
"repo_name": "heiths/allura",
"id": "216255d1bfd8720272b3fc2378bfdc6fff692b82",
"size": "5826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ForgeSVN/forgesvn/tests/model/test_svnimplementation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6142"
},
{
"name": "CSS",
"bytes": "173671"
},
{
"name": "HTML",
"bytes": "751039"
},
{
"name": "JavaScript",
"bytes": "1136845"
},
{
"name": "Makefile",
"bytes": "7788"
},
{
"name": "Puppet",
"bytes": "6872"
},
{
"name": "Python",
"bytes": "4238265"
},
{
"name": "RAML",
"bytes": "26153"
},
{
"name": "Ruby",
"bytes": "7006"
},
{
"name": "Shell",
"bytes": "131827"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
} |
from .rest import * | {
"content_hash": "87887e1e8e0c3a78ead78d82a6f646a3",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 19,
"avg_line_length": 19,
"alnum_prop": 0.7368421052631579,
"repo_name": "NeillHerbst/bitfinex-python",
"id": "be4ee0f6ccce2e64eb2c86c140166dd1c79fdeea",
"size": "19",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bitfinex_api/rest/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17265"
}
],
"symlink_target": ""
} |
"""Directives that mark task and configurable topics for the LSST Science
Pipelines documentation.
"""
__all__ = (
"ConfigurableTopicDirective",
"TaskTopicDirective",
"ConfigTopicDirective",
)
from docutils import nodes
from docutils.parsers.rst import Directive
from sphinx.errors import SphinxError
from sphinx.util.docutils import switch_source_input
from sphinx.util.logging import getLogger
from ..utils import parse_rst_content
from .crossrefs import format_config_id, format_task_id
from .taskutils import extract_docstring_summary, get_docstring, get_type
class BaseTopicDirective(Directive):
"""Base for topic target directives."""
_logger = getLogger(__name__)
has_content = True
required_arguments = 1
@property
def directive_name(self):
raise NotImplementedError
def get_type(self, class_name):
"""Get the topic type."""
raise NotImplementedError
def get_target_id(self, class_name):
"""Get the reference ID for this topic directive."""
raise NotImplementedError
def run(self):
"""Main entrypoint method.
Returns
-------
new_nodes : `list`
Nodes to add to the doctree.
"""
env = self.state.document.settings.env
try:
class_name = self.arguments[0]
except IndexError:
raise SphinxError(
"{0} directive requires a class name as an "
"argument".format(self.directive_name)
)
self._logger.debug(
"%s using class %s", self.directive_name, class_name
)
summary_node = self._create_summary_node(class_name)
# target_id = format_task_id(class_name)
target_id = self.get_target_id(class_name)
target_node = nodes.target("", "", ids=[target_id])
# Store these task/configurable topic nodes in the environment for
# later cross referencing.
if not hasattr(env, "lsst_task_topics"):
env.lsst_task_topics = {}
env.lsst_task_topics[target_id] = {
"docname": env.docname,
"lineno": self.lineno,
"target": target_node,
"summary_node": summary_node,
"fully_qualified_name": class_name,
"type": self.get_type(class_name),
}
return [target_node]
def _create_summary_node(self, class_name):
if len(self.content) > 0:
# Try to get the summary content from the directive content
container_node = nodes.container()
container_node.document = self.state.document
content_view = self.content
with switch_source_input(self.state, content_view):
self.state.nested_parse(content_view, 0, container_node)
return container_node.children
else:
# Fallback is to get summary sentence from class docstring.
return self._get_docstring_summary(class_name)
def _get_docstring_summary(self, class_name):
obj = get_type(class_name)
summary_text = extract_docstring_summary(get_docstring(obj))
if summary_text == "":
summary_text = "No description available."
summary_text = summary_text.strip() + "\n"
return parse_rst_content(summary_text, self.state)
class ConfigurableTopicDirective(BaseTopicDirective):
"""``lsst-configurable-topic`` directive that labels a Configurable's topic
page.
Configurables are essentially generalized tasks. They have a ConfigClass,
but don't have run methods.
"""
directive_name = "lsst-configurable-topic"
"""Default name of this directive.
"""
def get_type(self, class_name):
return "Configurable"
def get_target_id(self, class_name):
return format_task_id(class_name)
class TaskTopicDirective(BaseTopicDirective):
"""``lsst-task-topic`` directive that labels a Task's topic page."""
directive_name = "lsst-task-topic"
"""Default name of this directive.
"""
def get_type(self, class_name):
from lsst.pipe.base import CmdLineTask, PipelineTask
obj = get_type(class_name)
if issubclass(obj, PipelineTask):
return "PipelineTask"
elif issubclass(obj, CmdLineTask):
return "CmdLineTask"
else:
return "Task"
def get_target_id(self, class_name):
return format_task_id(class_name)
class ConfigTopicDirective(BaseTopicDirective):
"""``lsst-config-topic`` directive that labels a Config topic page.
Configs are lsst.pex.config.config.Config subclasses.
"""
directive_name = "lsst-config-topic"
"""Default name of this directive.
"""
def get_type(self, class_name):
return "Config"
def get_target_id(self, class_name):
return format_config_id(class_name)
| {
"content_hash": "600604791aaa3a10f1c75be108bb8e6a",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 30.314814814814813,
"alnum_prop": 0.6298106291997556,
"repo_name": "lsst-sqre/sphinxkit",
"id": "e9738499f5271492fa006ddec592674d9ca1c265",
"size": "4911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "documenteer/sphinxext/lssttasks/topics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16831"
}
],
"symlink_target": ""
} |
import os
import re
import requests
bing_key = os.environ.get('BING_API_KEY')
tomtom_key = os.environ.get('TOMTOM_API_KEY')
here_app_id = os.environ.get('HERE_APP_ID')
here_app_code = os.environ.get('HERE_APP_CODE')
geonames_username = os.environ.get('GEONAMES_USERNAME')
opencage_key = os.environ.get('OPENCAGE_API_KEY')
mapquest_key = os.environ.get('MAPQUEST_API_KEY')
baidu_key = os.environ.get('BAIDU_API_KEY')
baidu_security_key = os.environ.get('BAIDU_SECURITY_KEY')
gaode_key = os.environ.get('GAODE_API_KEY')
w3w_key = os.environ.get('W3W_API_KEY')
mapbox_access_token = os.environ.get('MAPBOX_ACCESS_TOKEN')
google_key = os.environ.get('GOOGLE_API_KEY')
google_client = os.environ.get('GOOGLE_CLIENT')
google_client_secret = os.environ.get('GOOGLE_CLIENT_SECRET')
mapzen_key = os.environ.get('MAPZEN_API_KEY')
tamu_key = os.environ.get('TAMU_API_KEY')
geocodefarm_key = os.environ.get('GEOCODEFARM_API_KEY')
tgos_key = os.environ.get('TGOS_API_KEY')
locationiq_key = os.environ.get('LOCATIONIQ_API_KEY')
class CanadapostKeyLazySingleton(object):
CANADAPOST_KEY_REGEX = re.compile(r"'(....-....-....-....)';")
def __init__(self):
self._key = None
def __call__(self, **kwargs):
if self._key is None:
self._key = self.retrieve_key(**kwargs)
return self._key
@classmethod
def retrieve_key(cls, **kwargs):
# get key with traditionnal mechanism
key = kwargs.get('key')
canadapost_key = os.environ.get('CANADAPOST_API_KEY')
if key or canadapost_key:
return key if key else canadapost_key
# fallback
try:
url = 'http://www.canadapost.ca/cpo/mc/personal/postalcode/fpc.jsf'
timeout = kwargs.get('timeout', 5.0)
proxies = kwargs.get('proxies', '')
r = requests.get(url, timeout=timeout, proxies=proxies)
match = cls.CANADAPOST_KEY_REGEX.search(r.text)
if match:
return match.group(1)
else:
raise ValueError('No API Key found')
except Exception as err:
raise ValueError('Could not retrieve API Key: %s' % err)
canadapost_key_getter = CanadapostKeyLazySingleton()
| {
"content_hash": "5e9f732022ca423448418638a98814c2",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 79,
"avg_line_length": 35.317460317460316,
"alnum_prop": 0.642247191011236,
"repo_name": "DenisCarriere/geocoder",
"id": "6d42c2d9d38e1dce96ee8882ca10dd3d68a114c7",
"size": "2258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geocoder/keys.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "415"
},
{
"name": "Python",
"bytes": "283948"
}
],
"symlink_target": ""
} |
from functions import *
| {
"content_hash": "bb13f4dd3aafd9490101b3bd50c7e6f3",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 23,
"avg_line_length": 24,
"alnum_prop": 0.7916666666666666,
"repo_name": "kevink1986/my-first-blog",
"id": "d71d12f98db759126cfc6aab25d8fd9c3137a57b",
"size": "24",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "functions/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8173"
},
{
"name": "HTML",
"bytes": "8653"
},
{
"name": "JavaScript",
"bytes": "6829"
},
{
"name": "Python",
"bytes": "956255"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
import views
urlpatterns = patterns('',
url(r'^viewall_contest/(?P<group_id>\d+)/$', views.get_running_contest, name='viewall_contest'),
url(r'^viewall_archive/(?P<group_id>\d+)/$', views.get_ended_contest, name='viewall_archive'),
url(r'^viewall_announce/(?P<group_id>\d+)/$', views.get_all_announce, name='viewall_announce'),
url(r'^list/$', views.list, name='list'),
url(r'^detail/(?P<group_id>\d+)/$', views.detail, name='detail'),
url(r'^new/$', views.new, name='new'),
url(r'^delete/(?P<group_id>\d+)/$', views.delete, name='delete'),
url(r'^edit/(?P<group_id>\d+)/$', views.edit, name='edit'),
)
| {
"content_hash": "33f237f7a71e585edc888a52d145429d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 100,
"avg_line_length": 51.5,
"alnum_prop": 0.636615811373093,
"repo_name": "drowsy810301/NTHUOJ_web",
"id": "23a436796bd988898a39862fd70df09d20166492",
"size": "721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "group/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17630"
},
{
"name": "HTML",
"bytes": "118458"
},
{
"name": "JavaScript",
"bytes": "47460"
},
{
"name": "Python",
"bytes": "236617"
}
],
"symlink_target": ""
} |
import os
import gc
import argparse
from time import time
from contextlib import contextmanager
from spec_data import CleanSpectra, write_spectra_file
@contextmanager
def timeit(message):
if message:
print(message)
t = time()
yield
print(" {0:.2g} sec".format(time() - t))
def process_file(input_file, output_file, Nmax=None,
n_components=200, p=2):
cln = CleanSpectra()
if Nmax is None:
Nmax_disp = 'all'
else:
Nmax_disp = Nmax
with timeit("loading {0} spectra from {1}".format(Nmax_disp, input_file)):
cln.load_data(input_file, Nmax)
with timeit("fitting weighted PCA for {0} components"
"".format(n_components)):
cln.fit_wpca(n_components=n_components)
with timeit("computing {0} reconstructed spectra:"
"".format(cln.spectra.shape[0])):
new_spectra = cln.reconstruct(p=p)
# clean up memory
wavelengths = cln.wavelengths.copy()
del cln
with timeit("writing reconstructed spectra to file"):
write_spectra_file(output_file,
spectra=new_spectra,
wavelengths=wavelengths)
def main():
parser = argparse.ArgumentParser(description='Clean spectra files')
parser.add_argument('filenames', type=str, nargs='+')
args = parser.parse_args()
for filename in args.filenames:
if not os.path.exists(filename) or not filename.endswith('.hdf5'):
raise ValueError("{0} is not a valid HDF5 file".format(filename))
for filename in args.filenames:
base, ext = os.path.splitext(filename)
output_file = base + "_clean" + ext
print("\n============================================================")
print("Cleaning", filename, "->", output_file)
process_file(input_file=filename,
output_file=output_file)
gc.collect()
if __name__ == '__main__':
main()
| {
"content_hash": "4ffec0e7288844dd64a801a08e3caa13",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 29.044117647058822,
"alnum_prop": 0.5893670886075949,
"repo_name": "jakevdp/spec_data",
"id": "88d79d6ce3ba53f11217a371be9fe5ef5d7bc98e",
"size": "1975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "process_spectra.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3978"
}
],
"symlink_target": ""
} |
import json
total = 0
def sumer(s):
global total
total += int(s)
data = {}
with open('../inputs/12.txt') as f:
data = json.load(f, parse_int=sumer)
print total
| {
"content_hash": "d63f0fddb9459e00addf725db539f55e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 39,
"avg_line_length": 13.307692307692308,
"alnum_prop": 0.6184971098265896,
"repo_name": "opello/adventofcode",
"id": "311d5c0f5043c81bbff6792b4ec71d5bcace6280",
"size": "196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2015/python/12-1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35422"
}
],
"symlink_target": ""
} |
from .pcollector import *
from .rpredictor_schedule import *
from .rpredictor import *
from .train import *
| {
"content_hash": "c8d927417a70e8903c7acd26ff6aec6f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 34,
"avg_line_length": 27,
"alnum_prop": 0.7685185185185185,
"repo_name": "vacancy/TensorArtist",
"id": "ae06411a3b10abd123a4cc635e52233dfb89ca23",
"size": "271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/human-preference/libhpref/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "497134"
},
{
"name": "Shell",
"bytes": "630"
}
],
"symlink_target": ""
} |
def get_amino_masses(input_file):
return dict(line.strip().split() for line in input_file)
def get_string_mass(string, masses_dict):
return sum(map(lambda ch: float(masses_dict[ch]), string))
def main():
with open("Amino_masses.txt", "r") as input_file:
mass_dict = get_amino_masses(input_file)
string = "SKADYEK"
print(get_string_mass(string, mass_dict))
main()
| {
"content_hash": "128e6da21437feeb63cddc56b6e88b40",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 59,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.7058823529411765,
"repo_name": "Daerdemandt/Learning-bioinformatics",
"id": "cc1136a338e3ef3f92796613daa7135b6aaa2cce",
"size": "398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PRTM/Solution.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "51233"
}
],
"symlink_target": ""
} |
"""Common settings module."""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
APPEND_SLASH = True
# Django settings
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY", "w;ioufpwqofjpwoifwpa09fuq039uq3u4uepoivqnwjdfvlwdv")
INSTALLED_APPS = [
# built in apps
"django.contrib.admin",
"django.contrib.admindocs",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
# apostello
"apostello.apps.ApostelloConfig",
"api",
"elvanto",
"graphs",
"site_config",
# third party apps
"rest_framework",
"rest_framework.authtoken",
"django_extensions",
"solo",
"django_redis",
"django_q",
"cookielaw",
# auth
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
]
SITE_ID = 1
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"apostello.middleware.FirstRunRedirect",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.template.context_processors.static",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"apostello.context_processors.global_settings",
],
"loaders": ["django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader"],
},
}
]
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
STATIC_URL = "/static/"
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
ROOT_URLCONF = "apostello.urls"
WSGI_APPLICATION = "apostello.wsgi.application"
LANGUAGE_CODE = "en-gb"
TIME_ZONE = os.environ.get("DJANGO_TIME_ZONE", "Europe/London")
USE_I18N = True
USE_L10N = True
USE_TZ = True
# session settings
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
MESSAGE_STORAGE = "django.contrib.messages.storage.fallback.FallbackStorage"
# Cache settings
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "127.0.0.1:6379",
"OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient"},
}
}
#
Q_CLUSTER = {
"name": "apostello",
"workers": 1,
"recycle": 250,
"timeout": 120,
"compress": True,
"save_limit": 500,
"queue_limit": 500,
"cpu_affinity": 1,
"label": "Django Q",
"django_redis": "default",
}
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
"DEFAULT_PERMISSION_CLASSES": ["rest_framework.permissions.DjangoModelPermissions"],
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
),
}
# email settings
EMAIL_BACKEND = "apostello.mail.ApostelloEmailBackend"
EMAIL_USE_TLS = True
# social login settings
ACCOUNT_ADAPTER = "apostello.account.ApostelloAccountAdapter"
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_DEFAULT_HTTP_PROTOCOL = os.environ.get("ACCOUNT_DEFAULT_HTTP_PROTOCOL", "https")
WHITELISTED_LOGIN_DOMAINS = os.environ.get("WHITELISTED_LOGIN_DOMAINS", "").split(",")
ACCOUNT_FORMS = {
"login": "apostello.forms.LoginForm",
"signup": "apostello.forms.SignupForm",
"add_email": "apostello.forms.AddEmailForm",
"change_password": "apostello.forms.ChangePasswordForm",
"set_password": "apostello.forms.SetPasswordForm",
"reset_password": "apostello.forms.ResetPasswordForm",
"reset_password_from_key": "apostello.forms.ResetPasswordKeyForm",
}
LOGIN_REDIRECT_URL = "/"
# Elvanto credentials
ELVANTO_KEY = os.environ.get("ELVANTO_KEY", "")
# Onebody credentials - if left blank, no syncing will be done, otherwise, data
# is pulled automatically once a day
# details on how to obtain the api key:
# https://github.com/churchio/onebody/wiki/API
ONEBODY_BASE_URL = os.environ.get("ONEBODY_BASE_URL")
ONEBODY_USER_EMAIL = os.environ.get("ONEBODY_USER_EMAIL")
ONEBODY_API_KEY = os.environ.get("ONEBODY_API_KEY")
ONEBODY_WAIT_TIME = 10
# Sms settings - note that messages over 160 will be charged twice
MAX_NAME_LENGTH = 16
SMS_CHAR_LIMIT = 160 - MAX_NAME_LENGTH + len("{name}")
# Used for nomalising elvanto imports, use twilio to limit sending to
# particular countries:
# https://www.twilio.com/help/faq/voice/what-are-global-permissions-and-why-do-they-exist
COUNTRY_CODE = os.environ.get("COUNTRY_CODE", "44")
NO_ACCESS_WARNING = (
"You do not have access to that page. " "If you believe you are seeing it in error please contact the office"
)
ROLLBAR_ACCESS_TOKEN = os.environ.get("ROLLBAR_ACCESS_TOKEN")
ROLLBAR_ACCESS_TOKEN_CLIENT = os.environ.get("ROLLBAR_ACCESS_TOKEN_CLIENT")
# solo caching:
SOLO_CACHE = "default"
# maximum number of SMS to send to clients from api
# if this is too large it may crash the elm run time
MAX_SMS_N = os.environ.get("MAX_SMS_TO_CLIENT", 5000)
try:
MAX_SMS_N = int(MAX_SMS_N)
except ValueError:
MAX_SMS_N = 5000
| {
"content_hash": "2abdbcf617638c1899b3b11bae3b34d0",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 119,
"avg_line_length": 32.265625,
"alnum_prop": 0.7034705407586763,
"repo_name": "monty5811/apostello",
"id": "f9ff1a2af39e69692583a88581ac8c1e7c9945eb",
"size": "6195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18413"
},
{
"name": "Elm",
"bytes": "484874"
},
{
"name": "HTML",
"bytes": "21141"
},
{
"name": "JavaScript",
"bytes": "31346"
},
{
"name": "Makefile",
"bytes": "640"
},
{
"name": "Python",
"bytes": "372217"
},
{
"name": "Shell",
"bytes": "3175"
}
],
"symlink_target": ""
} |
'''
The MIT License (MIT)
Copyright (c) 2015 @ CodeForBirmingham (http://codeforbirmingham.org)
@Author: Marcus Dillavou <marcus.dillavou@codeforbirmingham.org>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import cbminer
| {
"content_hash": "3fdce050056363146f7acb2d80520d18",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 78,
"avg_line_length": 46.38461538461539,
"alnum_prop": 0.7993366500829188,
"repo_name": "CodeforBirmingham/Birmingham-CoalbolMiner",
"id": "74aa7e7b479b6cf7d711ae2df47e3b22a970a61b",
"size": "1206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4409"
},
{
"name": "JavaScript",
"bytes": "649"
},
{
"name": "Python",
"bytes": "42551"
}
],
"symlink_target": ""
} |
"""
Adapted from Cython/Compiler/Visitor.py, see this module for detailed
explanations.
"""
import inspect
miniast = None # avoid circular import AttributeError for sphinx-apidoc
import treepath
class TreeVisitor(object):
"""
Non-mutating visitor. Subclass and implement visit_MyNode methods.
A user can traverse a foreign AST by implementing
:py:class:`minivect.miniast.Context.getchildren`
"""
want_access_path = False
def __init__(self, context):
self.context = context
self.dispatch_table = {}
if self.want_access_path:
self.access_path = []
else:
self._visitchild = self.visit
def _find_handler(self, obj):
# to resolve, try entire hierarchy
cls = type(obj)
pattern = "visit_%s"
mro = inspect.getmro(cls)
handler_method = None
for mro_cls in mro:
handler_method = getattr(self, pattern % mro_cls.__name__, None)
if handler_method is not None:
return handler_method
raise RuntimeError("Visitor %r does not accept object: %s" % (self, obj))
def visit(self, obj, *args):
"Visit a single child."
try:
handler_method = self.dispatch_table[type(obj)]
except KeyError:
handler_method = self._find_handler(obj)
self.dispatch_table[type(obj)] = handler_method
return handler_method(obj)
def _visitchild(self, child, parent, attrname, idx):
self.access_path.append((parent, attrname, idx))
result = self.visit(child)
self.access_path.pop()
return result
def visit_childlist(self, child, parent=None, attr=None):
if isinstance(child, list):
childretval = [self._visitchild(child_node, parent, attr, idx)
for idx, child_node in enumerate(child)]
else:
childretval = self._visitchild(child, parent, attr, None)
if isinstance(childretval, list):
raise RuntimeError(
'Cannot insert list here: %s in %r' % (attr, node))
return childretval
def visitchildren(self, parent, attrs=None):
"Visits the children of the given node."
if parent is None:
return None
if attrs is None:
attrs = self.context.getchildren(parent)
result = {}
for attr in attrs:
child = getattr(parent, attr)
if child is not None:
result[attr] = self.visit_childlist(child, parent, attr)
return result
def treepath(self, node, xpath_expr):
return treepath.iterfind(node, xpath_expr)
def treepath_first(self, node, xpath_expr):
return treepath.find_first(node, xpath_expr)
def p(self, node):
node.print_tree(self.context)
class VisitorTransform(TreeVisitor):
"""
Mutating transform. Each attribute is replaced by the result of the
corresponding visit_MyNode method.
"""
def visitchildren(self, parent, attrs=None):
result = super(VisitorTransform, self).visitchildren(parent, attrs)
for attr, newnode in result.iteritems():
if not type(newnode) is list:
setattr(parent, attr, newnode)
else:
# Flatten the list one level and remove any None
newlist = []
for x in newnode:
if x is not None:
if type(x) is list:
newlist += x
else:
newlist.append(x)
setattr(parent, attr, newlist)
return result
class GenericVisitor(TreeVisitor):
"Generic visitor that automatically visits children"
def visit_Node(self, node):
self.visitchildren(node)
return node
class GenericTransform(VisitorTransform, GenericVisitor):
"Generic transform that automatically visits children"
class MayErrorVisitor(TreeVisitor):
"""
Determine whether code generated by an AST can raise exceptions.
"""
may_error = False
def visit_Node(self, node):
self.visitchildren(node)
def visit_NodeWrapper(self, node):
self.may_error = (self.may_error or
self.context.may_error(node.opaque_node))
def visit_ForNode(self, node):
self.visit(node.init)
self.visit(node.condition)
self.visit(node.step)
class PrintTree(TreeVisitor):
"""
Print an AST, see also :py:class:`minivect.miniast.Node.print_tree`.
"""
indent = 0
want_access_path = True
def format_value(self, node):
import miniast
if node.is_temp:
format_value = node.repr_name
elif (isinstance(node, miniast.Variable) or
isinstance(node, miniast.FuncNameNode) or
node.is_funcarg):
format_value = node.name
elif node.is_binop or node.is_unop:
format_value = node.operator
elif node.is_constant:
format_value = node.value
elif node.is_sizeof:
format_value = str(node.type)
else:
return None
return format_value
def format_node(self, node, want_type_info=True):
result = type(node).__name__
format_value = self.format_value(node)
if node.is_expression and want_type_info:
if format_value is not None:
format_value = "%s, type=%s" % (format_value, node.type)
else:
format_value = "type=%s" % (node.type,)
if format_value:
return "%s(%s)" % (result, format_value)
else:
return result
def visit_Node(self, node):
if self.access_path:
parent, attr, idx = self.access_path[-1]
else:
attr = "(root)"
idx = None
prefix = "%s%s" % (self.indent * " ", attr)
if idx is not None:
prefix = "%s[%d]" % (prefix, idx)
print "%s: %s" % (prefix, self.format_node(node))
self.indent += 1
self.visitchildren(node)
self.indent -= 1 | {
"content_hash": "b4642f70c3669cfbddfe542ce045edae",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 81,
"avg_line_length": 30.591133004926107,
"alnum_prop": 0.5758454106280193,
"repo_name": "markflorisson/minivect",
"id": "15eabba1a5b2ef537ca0a8bf0110a52db3cb4a0f",
"size": "6210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "minivect/minivisitor.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "19490"
},
{
"name": "FORTRAN",
"bytes": "6189"
},
{
"name": "Python",
"bytes": "334834"
}
],
"symlink_target": ""
} |
from JumpScale import j
from .Params import ParamsFactory
j.base.loader.makeAvailable(j, 'core')
j.core.params = ParamsFactory()
| {
"content_hash": "f22f85d319b1fb946ba8e1e904fc97ba",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 38,
"avg_line_length": 26,
"alnum_prop": 0.7846153846153846,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "b36a1963defccd7bb0b4596bcdce736825ccd93d",
"size": "130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/baselib/params/__init__.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
} |
class Unit():
def __init__(self,request,data):
pass
def text(self):
pass | {
"content_hash": "ae094bcca417d466ebed57594ef6fb42",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 36,
"avg_line_length": 19.2,
"alnum_prop": 0.5208333333333334,
"repo_name": "yubang/urlHander",
"id": "2edafd82abe28af9002b2bcd5e16b9dc2d5bba8f",
"size": "111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classes/test/unit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "161"
},
{
"name": "Python",
"bytes": "11856"
}
],
"symlink_target": ""
} |
"""This module implements plugin test."""
from __future__ import absolute_import
import os
import types
from swak.config import get_exe_dir
from swak.plugin import iter_plugins, import_plugins_package, TextInput,\
Parser, get_plugins_dir, Output
# from swak.util import test_logconfig
from swak.const import PLUGINDIR_PREFIX
from swak.memorybuffer import MemoryBuffer
# test_logconfig()
def plugin_filter(_dir):
"""Filter plugins."""
return _dir in ['scounter', 'stdout']
def plugin_filter_ext(_dir):
"""Plugin filter for external plugin test."""
return _dir in ['{}-testfoo'.format(PLUGINDIR_PREFIX)]
def test_plugin_util():
"""Test plugin util."""
# check standard plugin dir
path = os.path.join(get_exe_dir(), 'stdplugins')
assert path == get_plugins_dir(True)
plugin_infos = list(iter_plugins(True, None, plugin_filter))
assert len(plugin_infos) > 0
# check external plugin dir
path = os.path.join(get_exe_dir(), 'plugins')
assert path == get_plugins_dir(False)
def test_plugin_import():
"""Test import plugins from plugins base package."""
stdplugins = import_plugins_package(True)
assert isinstance(stdplugins, types.ModuleType)
__import__('stdplugins.counter')
__import__('stdplugins.filter')
def test_plugin_basic(agent):
"""Test plugins basic features."""
class FooInput(TextInput):
names = ["john", "jane", "smith"]
def generate_line(self):
for i in range(3):
yield '{} {}'.format(FooInput.names[i], i + 1)
class FooParser(Parser):
def parse(self, line):
name, rank = line.split()
return dict(name=name, rank=rank)
parser = FooParser()
dtinput = FooInput()
dtinput.set_parser(parser)
agent.register_plugin("test", dtinput)
def filter(line):
return 'j' in line
dtinput.set_filter_func(filter)
agent.simple_process(dtinput)
agent.flush(True)
bulks = agent.def_output.bulks
assert len(bulks) == 2
assert "'name': 'john'" in bulks[0].split('\t')[2]
assert "'name': 'jane'" in bulks[1].split('\t')[2]
def test_plugin_output():
"""Test output plugin."""
# stopping output let its buffer stopped.
buf = MemoryBuffer(None, False)
out = Output(None, buf)
assert out.buffer is buf
out.start()
assert out.buffer.started
out.stop()
assert not out.buffer.started
| {
"content_hash": "4a69b81569c686f531dc49644bd8629e",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 73,
"avg_line_length": 27.1,
"alnum_prop": 0.6478064780647806,
"repo_name": "haje01/swak",
"id": "2b6ad54938edfe9a1da30058a6f1f23ff370701c",
"size": "2439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "44"
},
{
"name": "Python",
"bytes": "175737"
}
],
"symlink_target": ""
} |
from time import sleep
from selenium import webdriver
class PyTest2():
def test(self):
xx = webdriver.Firefox()
xx.get("http://172.31.95.220/ranzhi/")
sleep(3)
xx.refresh()
sleep(3)
xx.find_element_by_css_selector('#account').send_keys("admin")
xx.find_element_by_css_selector('#password').send_keys("123456")
xx.find_element_by_css_selector('#submit').click()
sleep(5)
xx.quit()
if __name__ == "__main__":
yy = PyTest2()
yy.test()
| {
"content_hash": "89a7730a573da9504ed3fe3256bccee2",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 72,
"avg_line_length": 21.28,
"alnum_prop": 0.5657894736842105,
"repo_name": "lintyleo/seleniumpro",
"id": "d9ef1b0e88b54a20ae4294be36203cc0a656afc6",
"size": "532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HelloPySelenium/pytest2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "69706"
},
{
"name": "HTML",
"bytes": "286635"
},
{
"name": "Java",
"bytes": "54726"
},
{
"name": "JavaScript",
"bytes": "1470402"
},
{
"name": "Python",
"bytes": "33159"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.conf import settings
from django_openid.registration import RegistrationConsumer
from intranet_account.forms import OpenIDSignupForm
from intranet_account.utils import get_default_redirect
class PinaxConsumer(RegistrationConsumer):
def on_registration_complete(self, request):
return HttpResponseRedirect(get_default_redirect(request))
def show_i_have_logged_you_in(self, request):
return HttpResponseRedirect(get_default_redirect(request))
def get_registration_form_class(self, request):
return OpenIDSignupForm
def render(self, request, template, context=None):
# TODO: remove this method. this method is re-implemented to fix a
# http://code.google.com/p/django-openid/issues/detail?id=22
context = context or {}
context['base_template'] = self.base_template
return render_to_response(template, context,
context_instance=RequestContext(request)) | {
"content_hash": "41187d77efca180ca38e9f9e39120454",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 74,
"avg_line_length": 39.86206896551724,
"alnum_prop": 0.7474048442906575,
"repo_name": "ingenieroariel/pinax",
"id": "ee235e0aa4b665417f37d3f2ab05c306ccef9883",
"size": "1157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/intranet_project/apps/intranet_account/openid_consumer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3140"
},
{
"name": "Python",
"bytes": "520245"
}
],
"symlink_target": ""
} |
__all__ = ['MirroredDict', 'RelationalDict', 'ManyToManyDict', 'BidirectionalDict', 'BiDict', 'TwoWayDict']
__title__ = 'reflections'
__version__ = '1.0.1'
__author__ = 'Jared Suttles'
__license__ = 'Modified BSD'
__copyright__ = 'Copyright 2013 Jared Suttles'
from .mirroreddict import MirroredDict
from .relationaldict import RelationalDict, ManyToManyDict
from .bidict import BidirectionalDict, BiDict
from .twowaydict import TwoWayDict
| {
"content_hash": "6cc10396cc8ef91748d1ef512c20250f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 107,
"avg_line_length": 36.833333333333336,
"alnum_prop": 0.7398190045248869,
"repo_name": "jaredks/reflections",
"id": "43e93f72ef1d7e826d8d8af9309764db32f02fbe",
"size": "732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reflections/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "16671"
}
],
"symlink_target": ""
} |
"""Given user GO ids and parent terms, group user GO ids under one parent term.
Given a group of GO ids with one or more higher-level grouping terms, group
each user GO id under the most descriptive parent GO term.
Each GO id may have more than one parent. One of the parent(s) is chosen
to best represent the user GO id's function. The choice of parent is made by
regarding how close the parent GO id is to the bottom of its hierarchy.
The estimation of how close a GO term is to "the bottom" of its GO hierarchy
is estimated using the number of total Go term descendent counts below
that term.
"""
from goatools.gosubdag.plot.gosubdag_plot import GoSubDagPlot
from goatools.gosubdag.gosubdag import GoSubDag
from goatools.grouper.plotobj import PltGroupedGos, PltGroupedGosArgs
from goatools.grouper.colors import GrouperColors
from goatools.grouper.grprobj import Grouper
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
class GrouperPlot(object):
"""Groups the user GO ids under other GO IDs acting as headers for the GO groups."""
def __init__(self, grprobj):
self.grprobj = grprobj
def plot_sections(self, fout_dir=".", **kws_usr):
"""Plot groups of GOs which have been placed in sections."""
kws_plt, _ = self._get_kws_plt(None, **kws_usr)
PltGroupedGos(self).plot_sections(fout_dir, **kws_plt)
def plot_groups_all(self, fout_dir=".", **kws_pltargs): # Grouper
"""Plot each GO header group in Grouper."""
# kws: go2color max_gos upper_trigger max_upper
return PltGroupedGos(self).plot_groups_all(fout_dir, **kws_pltargs)
def get_pltdotstrs(self, **kws_usr):
"""Plot each GO header group in Grouper."""
# kws: go2color max_gos upper_trigger max_upper
return PltGroupedGos(self).get_pltdotstrs(**kws_usr)
def get_pltdotstr(self, **kws_usr):
"""Plot one GO header group in Grouper."""
dotstrs = self.get_pltdotstrs(**kws_usr)
assert len(dotstrs) == 1
return dotstrs[0]
def plot_groups_unplaced(self, fout_dir=".", **kws_usr):
"""Plot each GO group."""
# kws: go2color max_gos upper_trigger max_upper
plotobj = PltGroupedGos(self)
return plotobj.plot_groups_unplaced(fout_dir, **kws_usr)
def get_gosubdagplot(self, goids=None, **kws_usr):
"""Plot GO IDs."""
if goids is None:
goids = self.grprobj.usrgos
kws_plt, kws_dag = self._get_kws_plt(goids, **kws_usr)
gosubdag = GoSubDag(
goids,
self.grprobj.gosubdag.get_go2obj(goids),
self.grprobj.gosubdag.relationships,
rcntobj=self.grprobj.gosubdag.rcntobj,
go2nt=self.grprobj.gosubdag.go2nt,
**kws_dag)
return GoSubDagPlot(gosubdag, **kws_plt)
def plot_gos(self, fout_img, goids=None, **kws_usr):
"""Plot GO IDs."""
gosubdagplot = self.get_gosubdagplot(goids, **kws_usr) # GoSubDagPlot
gosubdagplot.plt_dag(fout_img)
def _get_kws_plt(self, usrgos, **kws_usr):
"""Add go2color and go2bordercolor relevant to this grouping into plot."""
kws_plt = kws_usr.copy()
kws_dag = {}
hdrgo = kws_plt.get('hdrgo', None)
objcolor = GrouperColors(self.grprobj)
# GO term colors
if 'go2color' not in kws_usr:
kws_plt['go2color'] = objcolor.get_go2color_users()
elif hdrgo is not None:
go2color = kws_plt.get('go2color').copy()
go2color[hdrgo] = PltGroupedGosArgs.hdrgo_dflt_color
kws_plt['go2color'] = go2color
# GO term border colors
if 'go2bordercolor' not in kws_usr:
kws_plt['go2bordercolor'] = objcolor.get_bordercolor()
prune = kws_usr.get('prune', None)
if prune is True and hdrgo is not None:
kws_dag['dst_srcs_list'] = [(hdrgo, usrgos), (None, set([hdrgo]))]
kws_plt['parentcnt'] = True
elif prune:
kws_dag['dst_srcs_list'] = prune
kws_plt['parentcnt'] = True
# Group text
kws_plt['go2txt'] = self.get_go2txt(self.grprobj,
kws_plt.get('go2color'), kws_plt.get('go2bordercolor'))
return kws_plt, kws_dag
@staticmethod
def get_go2txt(grprobj_cur, grp_go2color, grp_go2bordercolor):
"""Adds section text in all GO terms if not Misc. Adds Misc in terms of interest."""
goids_main = set(o.id for o in grprobj_cur.gosubdag.go2obj.values())
hdrobj = grprobj_cur.hdrobj
grprobj_all = Grouper("all",
grprobj_cur.usrgos.union(goids_main), hdrobj, grprobj_cur.gosubdag)
# Adds section text to all GO terms in plot (misses middle GO terms)
_secdflt = hdrobj.secdflt
_hilight = set(grp_go2color.keys()).union(grp_go2bordercolor)
ret_go2txt = {}
# Keep sections text only if GO header, GO user, or not Misc.
if hdrobj.sections:
for goid, txt in grprobj_all.get_go2sectiontxt().items():
if txt == 'broad':
continue
if txt != _secdflt or goid in _hilight:
ret_go2txt[goid] = txt
return ret_go2txt
def plot_grouped_gos(self, fout_img=None, exclude_hdrs=None, **kws_usr):
"""One Plot containing all user GOs (yellow or green) and header GO IDs(green or purple)."""
# kws_plt -> go2color go2bordercolor
kws_plt, kws_dag = self._get_kws_plt(self.grprobj.usrgos, **kws_usr)
pltgosusr = self.grprobj.usrgos
if exclude_hdrs is not None:
pltgosusr = pltgosusr.difference(self.grprobj.get_usrgos_g_hdrgos(exclude_hdrs))
if fout_img is None:
fout_img = "{GRP_NAME}.png".format(GRP_NAME=self.grprobj.grpname)
# Split one plot into potentially three (BP, MF, CC) if png filename contains '{NS}'
if '{NS}' in fout_img:
go2nt = self.grprobj.gosubdag.get_go2nt(pltgosusr)
for namespace in ['BP', 'MF', 'CC']:
pltgos_ns = [go for go in pltgosusr if go2nt[go].NS == namespace]
if pltgos_ns:
png = fout_img.format(NS=namespace)
self._plot_grouped_gos(png, pltgos_ns, kws_plt, kws_dag)
# Plot all user GO IDs into a single plot, regardless of their namespace
else:
self._plot_grouped_gos(fout_img, pltgosusr, kws_plt, kws_dag)
def _plot_grouped_gos(self, fout_img, pltgosusr, kws_plt, kws_dag):
gosubdag_plt = GoSubDag(
pltgosusr,
self.grprobj.gosubdag.get_go2obj(pltgosusr),
self.grprobj.gosubdag.relationships,
rcntobj=self.grprobj.gosubdag.rcntobj,
go2nt=self.grprobj.gosubdag.go2nt,
**kws_dag)
godagplot = GoSubDagPlot(gosubdag_plt, **kws_plt)
godagplot.plt_dag(fout_img)
# Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved.
| {
"content_hash": "d3c70a4df2cbca66c80ec0fc673b6c3d",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 100,
"avg_line_length": 45.30573248407644,
"alnum_prop": 0.6184451005201743,
"repo_name": "tanghaibao/goatools",
"id": "6705019149ef26a195071d328a334f89611c64df",
"size": "7113",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "goatools/grouper/grprplt.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "316670"
},
{
"name": "Makefile",
"bytes": "25213"
},
{
"name": "Python",
"bytes": "146769147"
},
{
"name": "Shell",
"bytes": "1107"
}
],
"symlink_target": ""
} |
import urllib2
import sys
import os
import time
'''Download "Entire Issue" of the Congressional Record for the period May 1 - 31 2014.'''
month = 5 # set month to May
# make an archive/ directory if it doesn't already exist
if not os.path.exists(os.getcwd() + "/archive"):
os.mkdir(os.getcwd() + "/archive")
# try downloading the Congressional Record for May 2014
#for day in range(1,31):
for day in range(1,6):
# construct the URL from which we will try to retrieve the record
fileName = "CREC-2014-%02d-%02d.pdf" % (month,day)
fileURL = "http://beta.congress.gov/crec/2014/%02d/%02d/%s" % (month,day,fileName)
localFile = os.getcwd() + "/archive/" + fileName
# use the urllib2 module to fetch the record
resp = urllib2.urlopen(fileURL)
# write the record (PDF file) to a local file
with open(localFile,"wb") as fout:
fout.write(resp.read())
print("Downloaded file %s." % fileURL)
# inject interval between consecutive requests
time.sleep(5)
| {
"content_hash": "91159137d8152dfa401ac4647dd70acc",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 89,
"avg_line_length": 30.575757575757574,
"alnum_prop": 0.6788899900891973,
"repo_name": "jasonjensen/Montreal-Python-Web",
"id": "b8acddcb728bde73440dfb38441b0f888f6e959c",
"size": "1237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day2/getCongressBreaks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "284670"
},
{
"name": "Python",
"bytes": "23611"
}
],
"symlink_target": ""
} |
"""
Objects for dealing with Laguerre series.
This module provides a number of objects (mostly functions) useful for
dealing with Laguerre series, including a `Laguerre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `lagdomain` -- Laguerre series default domain, [-1,1].
- `lagzero` -- Laguerre series that evaluates identically to 0.
- `lagone` -- Laguerre series that evaluates identically to 1.
- `lagx` -- Laguerre series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``.
- `lagadd` -- add two Laguerre series.
- `lagsub` -- subtract one Laguerre series from another.
- `lagmul` -- multiply two Laguerre series.
- `lagdiv` -- divide one Laguerre series by another.
- `lagval` -- evaluate a Laguerre series at given points.
- `lagval2d` -- evaluate a 2D Laguerre series at given points.
- `lagval3d` -- evaluate a 3D Laguerre series at given points.
- `laggrid2d` -- evaluate a 2D Laguerre series on a Cartesian product.
- `laggrid3d` -- evaluate a 3D Laguerre series on a Cartesian product.
Calculus
--------
- `lagder` -- differentiate a Laguerre series.
- `lagint` -- integrate a Laguerre series.
Misc Functions
--------------
- `lagfromroots` -- create a Laguerre series with specified roots.
- `lagroots` -- find the roots of a Laguerre series.
- `lagvander` -- Vandermonde-like matrix for Laguerre polynomials.
- `lagvander2d` -- Vandermonde-like matrix for 2D power series.
- `lagvander3d` -- Vandermonde-like matrix for 3D power series.
- `laggauss` -- Gauss-Laguerre quadrature, points and weights.
- `lagweight` -- Laguerre weight function.
- `lagcompanion` -- symmetrized companion matrix in Laguerre form.
- `lagfit` -- least-squares fit returning a Laguerre series.
- `lagtrim` -- trim leading coefficients from a Laguerre series.
- `lagline` -- Laguerre series of given straight line.
- `lag2poly` -- convert a Laguerre series to a polynomial.
- `poly2lag` -- convert a polynomial to a Laguerre series.
Classes
-------
- `Laguerre` -- A Laguerre series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd',
'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder',
'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander',
'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d',
'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion',
'laggauss', 'lagweight']
lagtrim = pu.trimcoef
def poly2lag(pol):
"""
poly2lag(pol)
Convert a polynomial to a Laguerre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Laguerre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Laguerre
series.
See Also
--------
lag2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import poly2lag
>>> poly2lag(np.arange(4))
array([ 23., -63., 58., -18.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = lagadd(lagmulx(res), pol[i])
return res
def lag2poly(c):
"""
Convert a Laguerre series to a polynomial.
Convert an array representing the coefficients of a Laguerre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Laguerre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2lag
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import lag2poly
>>> lag2poly([ 23., -63., 58., -18.])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i)
return polyadd(c0, polysub(c1, polymulx(c1)))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Laguerre
lagdomain = np.array([0, 1])
# Laguerre coefficients representing zero.
lagzero = np.array([0])
# Laguerre coefficients representing one.
lagone = np.array([1])
# Laguerre coefficients representing the identity x.
lagx = np.array([1, -1])
def lagline(off, scl):
"""
Laguerre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Laguerre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.laguerre import lagline, lagval
>>> lagval(0,lagline(3, 2))
3.0
>>> lagval(1,lagline(3, 2))
5.0
"""
if scl != 0:
return np.array([off + scl, -scl])
else:
return np.array([off])
def lagfromroots(roots):
"""
Generate a Laguerre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Laguerre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Laguerre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, chebfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> from numpy.polynomial.laguerre import lagfromroots, lagval
>>> coef = lagfromroots((-1, 0, 1))
>>> lagval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = lagfromroots((-1j, 1j))
>>> lagval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [lagline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [lagmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = lagmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def lagadd(c1, c2):
"""
Add one Laguerre series to another.
Returns the sum of two Laguerre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Laguerre series of their sum.
See Also
--------
lagsub, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Laguerre series
is a Laguerre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagadd
>>> lagadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def lagsub(c1, c2):
"""
Subtract one Laguerre series from another.
Returns the difference of two Laguerre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their difference.
See Also
--------
lagadd, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Laguerre
series is a Laguerre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagsub
>>> lagsub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def lagmulx(c):
"""Multiply a Laguerre series by x.
Multiply the Laguerre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Laguerre
polynomials in the form
.. math::
xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.laguerre import lagmulx
>>> lagmulx([1, 2, 3])
array([ -1., -1., 11., -9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]
prd[1] = -c[0]
for i in range(1, len(c)):
prd[i + 1] = -c[i]*(i + 1)
prd[i] += c[i]*(2*i + 1)
prd[i - 1] -= c[i]*i
return prd
def lagmul(c1, c2):
"""
Multiply one Laguerre series by another.
Returns the product of two Laguerre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their product.
See Also
--------
lagadd, lagsub, lagdiv, lagpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Laguerre polynomial basis set. Thus, to express
the product as a Laguerre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagmul
>>> lagmul([1, 2, 3], [0, 1, 2])
array([ 8., -13., 38., -51., 36.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd)
return lagadd(c0, lagsub(c1, lagmulx(c1)))
def lagdiv(c1, c2):
"""
Divide one Laguerre series by another.
Returns the quotient-with-remainder of two Laguerre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Laguerre series coefficients representing the quotient and
remainder.
See Also
--------
lagadd, lagsub, lagmul, lagpow
Notes
-----
In general, the (polynomial) division of one Laguerre series by another
results in quotient and remainder terms that are not in the Laguerre
polynomial basis set. Thus, to express these results as a Laguerre
series, it is necessary to "reproject" the results onto the Laguerre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagdiv
>>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 1.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = lagmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def lagpow(c, pow, maxpower=16):
"""Raise a Laguerre series to a power.
Returns the Laguerre series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Laguerre series of power.
See Also
--------
lagadd, lagsub, lagmul, lagdiv
Examples
--------
>>> from numpy.polynomial.laguerre import lagpow
>>> lagpow([1, 2, 3], 2)
array([ 14., -16., 56., -72., 54.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = lagmul(prd, c)
return prd
def lagder(c, m=1, scl=1, axis=0):
"""
Differentiate a Laguerre series.
Returns the Laguerre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Laguerre series of the derivative.
See Also
--------
lagint
Notes
-----
In general, the result of differentiating a Laguerre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagder
>>> lagder([ 1., 1., 1., -3.])
array([ 1., 2., 3.])
>>> lagder([ 1., 0., 0., -4., 3.], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 1, -1):
der[j - 1] = -c[j]
c[j - 1] += c[j]
der[0] = -c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Laguerre series.
Returns the Laguerre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Laguerre series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
lagder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagint
>>> lagint([1,2,3])
array([ 1., 1., 1., -3.])
>>> lagint([1,2,3], m=2)
array([ 1., 0., 0., -4., 3.])
>>> lagint([1,2,3], k=1)
array([ 2., 1., 1., -3.])
>>> lagint([1,2,3], lbnd=-1)
array([ 11.5, 1. , 1. , -3. ])
>>> lagint([1,2], m=2, k=[1,2], lbnd=-1)
array([ 11.16666667, -5. , -3. , 2. ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]
tmp[1] = -c[0]
for j in range(1, n):
tmp[j] += c[j]
tmp[j + 1] = -c[j]
tmp[0] += k[i] - lagval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def lagval(x, c, tensor=True):
"""
Evaluate a Laguerre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
lagval2d, laggrid2d, lagval3d, laggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.laguerre import lagval
>>> coef = [1,2,3]
>>> lagval(1, coef)
-0.5
>>> lagval([[1,2],[3,4]], coef)
array([[-0.5, -4. ],
[-4.5, -2. ]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*((2*nd - 1) - x))/nd
return c0 + c1*(1 - x)
def lagval2d(x, y, c):
"""
Evaluate a 2-D Laguerre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
lagval, laggrid2d, lagval3d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except Exception:
raise ValueError('x, y are incompatible')
c = lagval(x, c)
c = lagval(y, c, tensor=False)
return c
def laggrid2d(x, y, c):
"""
Evaluate a 2-D Laguerre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
lagval, lagval2d, lagval3d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = lagval(x, c)
c = lagval(y, c)
return c
def lagval3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimension polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
lagval, lagval2d, laggrid2d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except Exception:
raise ValueError('x, y, z are incompatible')
c = lagval(x, c)
c = lagval(y, c, tensor=False)
c = lagval(z, c, tensor=False)
return c
def laggrid3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
lagval, lagval2d, laggrid2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
c = lagval(x, c)
c = lagval(y, c)
c = lagval(z, c)
return c
def lagvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Laguerre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and
``lagval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Laguerre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Laguerre polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.laguerre import lagvander
>>> x = np.array([0, 1, 2])
>>> lagvander(x, 3)
array([[ 1. , 1. , 1. , 1. ],
[ 1. , 0. , -0.5 , -0.66666667],
[ 1. , -1. , -1. , -0.33333333]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = 1 - x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i
return np.rollaxis(v, 0, v.ndim)
def lagvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Laguerre polynomials.
If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Laguerre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
lagvander, lagvander3d. lagval2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = lagvander(x, degx)
vy = lagvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def lagvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Laguerre polynomials.
If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Laguerre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
lagvander, lagvander3d. lagval2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = lagvander(x, degx)
vy = lagvander(y, degy)
vz = lagvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def lagfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Laguerre series to data.
Return the coefficients of a Laguerre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Laguerre coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, polyfit, hermfit, hermefit
lagval : Evaluates a Laguerre series.
lagvander : pseudo Vandermonde matrix of Laguerre series.
lagweight : Laguerre weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Laguerre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Laguerre series are probably most useful when the data can
be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Laguerre
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `lagweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.laguerre import lagfit, lagval
>>> x = np.linspace(0, 10)
>>> err = np.random.randn(len(x))/10
>>> y = lagval(x, [1, 2, 3]) + err
>>> lagfit(x, y, 2)
array([ 0.96971004, 2.00193749, 3.00288744])
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = lagvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = lagvander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax+1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def lagcompanion(c):
"""
Return the companion matrix of c.
The usual companion matrix of the Laguerre polynomials is already
symmetric when `c` is a basis Laguerre polynomial, so no scaling is
applied.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[1 + c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
top = mat.reshape(-1)[1::n+1]
mid = mat.reshape(-1)[0::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = -np.arange(1, n)
mid[...] = 2.*np.arange(n) + 1.
bot[...] = top
mat[:, -1] += (c[:-1]/c[-1])*n
return mat
def lagroots(c):
"""
Compute the roots of a Laguerre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, chebroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Laguerre series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.laguerre import lagroots, lagfromroots
>>> coef = lagfromroots([0, 1, 2])
>>> coef
array([ 2., -8., 12., -6.])
>>> lagroots(coef)
array([ -4.44089210e-16, 1.00000000e+00, 2.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([1 + c[0]/c[1]])
m = lagcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def laggauss(deg):
"""
Gauss-Laguerre quadrature.
Computes the sample points and weights for Gauss-Laguerre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]`
with the weight function :math:`f(x) = \\exp(-x)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100 higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = lagcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = lagval(x, c)
df = lagval(x, lagder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = lagval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# scale w to get the right value, 1 in this case
w /= w.sum()
return x, w
def lagweight(x):
"""Weight function of the Laguerre polynomials.
The weight function is :math:`exp(-x)` and the interval of integration
is :math:`[0, \\inf]`. The Laguerre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-x)
return w
#
# Laguerre series class
#
class Laguerre(ABCPolyBase):
"""A Laguerre series class.
The Laguerre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Laguerre coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [0, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [0, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(lagadd)
_sub = staticmethod(lagsub)
_mul = staticmethod(lagmul)
_div = staticmethod(lagdiv)
_pow = staticmethod(lagpow)
_val = staticmethod(lagval)
_int = staticmethod(lagint)
_der = staticmethod(lagder)
_fit = staticmethod(lagfit)
_line = staticmethod(lagline)
_roots = staticmethod(lagroots)
_fromroots = staticmethod(lagfromroots)
# Virtual properties
nickname = 'lag'
domain = np.array(lagdomain)
window = np.array(lagdomain)
| {
"content_hash": "848d1613802d106a0b7a6d0643b8c7a9",
"timestamp": "",
"source": "github",
"line_count": 1799,
"max_line_length": 79,
"avg_line_length": 31.223457476375764,
"alnum_prop": 0.5924231364939203,
"repo_name": "bringingheavendown/numpy",
"id": "387d986fab606eca6efbe216c391dc4a05f63b81",
"size": "56171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/polynomial/laguerre.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8111609"
},
{
"name": "C++",
"bytes": "165060"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "Makefile",
"bytes": "2574"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "6660670"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ShowValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="show", parent_name="volume.caps.z", **kwargs):
super(ShowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| {
"content_hash": "a54091ad2a5bd2211cec2268ca3c6796",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 82,
"avg_line_length": 35.90909090909091,
"alnum_prop": 0.6151898734177215,
"repo_name": "plotly/plotly.py",
"id": "c99ded1cc672a4bc9863562873531fe84392a0ff",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/volume/caps/z/_show.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import tkinter
import tkinter.filedialog
import tkinter.messagebox
from functools import partial
from tkinter import ttk
from odk_tools.gui.wrappers import generate_images, generate_xform, \
validate_xform, generate_editions
from odk_tools.gui import preferences
class ODKToolsGui:
def __init__(self, master=None):
"""
Set up the GUI by creating controls and adding them to the master frame.
Controls appear in the GUI in the order they were added. Since many of
the controls are similar, the repetitive parts of the process are
abstracted to helper functions. The remaining functions are for input
validation, or wrappers around commands that are run by clicking the
button controls.
"""
prefs = preferences.Preferences()
master.title(prefs.app_title)
ttk.Style().configure('.', font=prefs.font)
ODKToolsGui.build_generate_xform(master=master, prefs=prefs)
ODKToolsGui.build_validate_xform(master=master, prefs=prefs)
ODKToolsGui.build_generate_images(master=master, prefs=prefs)
ODKToolsGui.build_generate_editions(master=master, prefs=prefs)
ODKToolsGui.build_output_box(master=master, prefs=prefs)
@staticmethod
def build_generate_xform(master, prefs):
"""Setup for the Generate XForm task widgets."""
master.xlsform_path, xlsform_path = ODKToolsGui.build_path_frame(
master=master,
label_text="* XLSForm path", label_width=prefs.label_width,
textbox_width=prefs.textbox_width, browser_kw=prefs.xlsx_browse)
master.generate_xform = ODKToolsGui.build_action_frame(
master=master,
label_text="Generate XForm", label_width=prefs.label_width,
command=lambda: ODKToolsGui.generate_xform(
master=master, xlsform_path=xlsform_path),
pre_msg=prefs.generic_pre_msg.format("Generate XForm"))
@staticmethod
def build_validate_xform(master, prefs):
"""Setup for the Validate XForm task widgets."""
master.sep0 = ttk.Separator(master=master).grid(sticky="we")
master.xform_in_path, xform_in_path = ODKToolsGui.build_path_frame(
master=master,
label_text="* XForm path", label_width=prefs.label_width,
textbox_width=prefs.textbox_width, browser_kw=prefs.xml_browse)
master.java_path, java_path = ODKToolsGui.build_path_frame(
master=master,
label_text="Java path", label_width=prefs.label_width,
textbox_width=prefs.textbox_width, browser_kw=prefs.exe_browse)
master.validate_path, validate_path = ODKToolsGui.build_path_frame(
master=master,
label_text="ODK_Validate path", label_width=prefs.label_width,
textbox_width=prefs.textbox_width, browser_kw=prefs.jar_browse)
master.validate_xform = ODKToolsGui.build_action_frame(
master=master,
label_text="Validate XForm", label_width=prefs.label_width,
command=lambda: ODKToolsGui.validate_xform(
master=master, java_path=java_path, validate_path=validate_path,
xform_path=xform_in_path),
pre_msg=prefs.generic_pre_msg.format("Validate XForm"))
@staticmethod
def build_generate_images(master, prefs):
"""Setup for the Generate Images task widgets."""
master.sep1 = ttk.Separator(master=master).grid(sticky="we")
master.xlsform_path_images, xlsform_path_images = \
ODKToolsGui.build_path_frame(
master=master,
label_text="* XLSForm path", label_width=prefs.label_width,
textbox_width=prefs.textbox_width, browser_kw=prefs.xlsx_browse)
master.generate_images = ODKToolsGui.build_action_frame(
master=master,
label_text="Generate Images", label_width=prefs.label_width,
command=lambda: ODKToolsGui.generate_images(
master=master, xlsform_path=xlsform_path_images),
pre_msg=prefs.generic_pre_msg.format("Generate Images"))
@staticmethod
def build_generate_editions(master, prefs):
"""Setup for the Generate Editions task widgets."""
master.sep2 = ttk.Separator(master=master).grid(sticky="we")
master.xform_sl_path, xform_sl_path = ODKToolsGui.build_path_frame(
master=master,
label_text="* XForm path", label_width=prefs.label_width,
textbox_width=prefs.textbox_width, browser_kw=prefs.xml_browse)
master.sitelangs_path, sitelangs_path = ODKToolsGui.build_path_frame(
master=master,
label_text="* Site Languages path", label_width=prefs.label_width,
textbox_width=prefs.textbox_width, browser_kw=prefs.xlsx_browse)
master.collect_settings, collect_settings = \
ODKToolsGui.build_path_frame(
master=master,
label_text="Collect Settings path",
label_width=prefs.label_width,
textbox_width=prefs.textbox_width,
browser_kw=prefs.settings_browse)
nest_in_odk_folders = tkinter.IntVar()
master.generate_editions = ODKToolsGui.build_action_frame(
master=master,
label_text="Generate Editions", label_width=prefs.label_width,
command=lambda: ODKToolsGui.generate_editions(
master=master, xform_path=xform_sl_path,
sitelangs_path=sitelangs_path,
collect_settings=collect_settings,
nest_in_odk_folders=nest_in_odk_folders),
pre_msg=prefs.generic_pre_msg.format("Generate Editions"))
master.nest_in_odk_folders = ttk.Checkbutton(
master=master.generate_editions, variable=nest_in_odk_folders,
text="Nest output in 'odk/forms/*'")
master.nest_in_odk_folders.grid(row=0, column=2, padx=5)
master.sep3 = ttk.Separator(master=master).grid(sticky="we")
master.output = ttk.Frame(master=master, height=10)
master.output.grid(sticky='w')
master.output.rowconfigure(index=0, pad=10, weight=1)
@staticmethod
def build_output_box(master, prefs):
"""Setup for the task results output box."""
master.output.row_label = ttk.Label(
master=master.output, text="Last run output",
width=prefs.label_width)
master.output.row_label.grid(row=0, column=0, padx=5, sticky="w")
master.output.textbox = tkinter.Text(
master=master.output, width=prefs.textbox_width+10,
height=prefs.output_height)
master.output.textbox.config(wrap='word', font=prefs.font)
master.output.textbox.grid(row=0, column=1, padx=5, columnspan=2)
master.output.scroll = ttk.Scrollbar(
master=master.output, command=master.output.textbox.yview)
master.output.scroll.grid(row=0, column=3, padx=5, pady=5, sticky='ns')
master.output.textbox['yscrollcommand'] = master.output.scroll.set
@staticmethod
def textbox_pre_message(event, message):
"""Clear the output Text field and insert the provided message."""
event.widget.master.master.output.textbox.delete("1.0", tkinter.END)
event.widget.master.master.output.textbox.insert(tkinter.END, message)
@staticmethod
def build_action_frame(master, label_text, label_width, command,
pre_msg=None):
"""
Generate a frame with a button for executing a command.
The frame contains a grid row, with 3 columns: a label and a button
labelled "Run" which executes the command on click.
The command / function passed in should be a lambda which doesn't
return or require any input parameters; in the above layout code the
examples bake in a reference to the relevant variable (bound to a
control) which is used to run the function.
So that the user is notified that the task connected to the "Run"
button has started, once the ButtonPress event fires, the specified
pre_msg is displayed in the main output textbox. The button's command
is implicitly attached to the subsequent ButtonRelease event. Refs:
- http://tcl.tk/man/tcl8.5/TkCmd/bind.htm#M7
- http://tcl.tk/man/tcl8.5/TkCmd/button.htm#M5
Parameters.
:param master: tk.Frame. The parent of the generated frame.
:param label_text: str. The text to display next to the command button.
:param label_width: int. How wide the label should be.
:param command: function. What to do when the button is clicked.
:param pre_msg: str. Message to display in textbox on button press.
:return: path frame (tk Frame), path variable (tk StringVar)
"""
frame = ttk.Frame(master=master)
frame.grid(sticky='w')
frame.rowconfigure(index=0, pad=10, weight=1)
frame.row_label = ttk.Label(
master=frame, text=label_text, width=label_width)
frame.row_label.grid(row=0, column=0, padx=5, sticky="w")
frame.button = ttk.Button(master=frame, text="Run", command=command)
frame.button.grid(row=0, column=1, padx=5)
if pre_msg is not None:
frame.button.bind(
sequence="<ButtonPress>", add='+',
func=partial(ODKToolsGui.textbox_pre_message, message=pre_msg))
return frame
@staticmethod
def build_path_frame(
master, label_text, label_width, textbox_width, browser_kw,
dialog_function=tkinter.filedialog.askopenfilename):
"""
Generate a frame with controls for collecting a file path.
The frame contains a grid row, with 3 columns: a label, a text box,
and a button which opens the file explorer which can be used to
select the file path visually.
Parameters.
:param master: tk.Frame. The parent of the generated frame.
:param label_text: str. The text to display next to the path textbox.
:param label_width: int. How wide the label should be.
:param textbox_width: int. How wide the text box should be.
:param browser_kw: dict. Keyword arguments to pass to the file browser.
:param dialog_function: File dialog generation function to use.
:return: path frame (tk Frame), path variable (tk StringVar)
"""
frame = ttk.Frame(master=master)
frame.grid()
frame.rowconfigure(index=0, pad=10, weight=1)
frame.row_label = ttk.Label(
master=frame, text=label_text, width=label_width)
frame.row_label.grid(row=0, column=0, padx=5, sticky="w")
path = tkinter.StringVar()
frame.textbox = ttk.Entry(
master=frame, textvariable=path, width=textbox_width)
frame.textbox.grid(row=0, column=1, padx=5, columnspan=2)
frame.browse = ttk.Button(
master=frame, text="Browse...",
command=lambda: ODKToolsGui.file_browser(
browser_kw=browser_kw, target_variable=path,
dialog_function=dialog_function))
frame.browse.grid(row=0, column=3, padx=5)
return frame, path
@staticmethod
def file_browser(browser_kw, target_variable, dialog_function):
"""
Set the target_variable value using a file chooser dialog.
Parameters.
:param browser_kw: dict. Passed in to filedialog constructor.
:param target_variable: tk control. Where should the value be placed.
:param dialog_function: function to generate the file dialog control.
"""
target_variable.set(dialog_function(**browser_kw))
@staticmethod
def textbox_replace(tk_end, widget, new_text):
"""
Clear a textbox widget and insert the new text.
Important! This is specific to "Entry" widgets, for which the start
index is specified as 0. For "Text" widgets, the start index is instead
in the form of "row.col" e.g. delete("1.0", END).
:param tk_end: the tkinter.END constant meaning the final textbox char.
:param widget: reference to the widget to work with.
:param new_text: text to insert into the widget.
:return: None
"""
if len(widget.get()) != 0:
widget.delete(0, tk_end)
widget.insert(tk_end, new_text)
@staticmethod
def generate_xform(master, xlsform_path):
"""
Run the XForm generator, and put the result in the main textbox.
If the task was run, copy the parameters down into the other task
path input text boxes. The XForm file is saved adjacent to the XLSForm,
with the same name except with an XML extension.
Parameters.
:param master: tkinter.Frame. Frame where master.output.textbox is.
:param xlsform_path: str. Path to XLSForm to convert.
"""
result, xform_path_used, xlsform_path_used = generate_xform.wrapper(
xlsform_path=xlsform_path.get())
master.output.textbox.insert(tkinter.END, result)
if xform_path_used is not None:
tk_end = tkinter.END
updates = [(master.xlsform_path.textbox, xlsform_path_used),
(master.xform_in_path.textbox, xform_path_used),
(master.xlsform_path_images.textbox, xlsform_path_used),
(master.xform_sl_path.textbox, xform_path_used)]
for w, t in updates:
ODKToolsGui.textbox_replace(tk_end=tk_end, widget=w, new_text=t)
@staticmethod
def generate_images(master, xlsform_path):
"""
Run the images generator, and put the result in the main textbox.
Parameters.
:param master: tkinter.Frame. Frame where master.output.textbox is.
:param xlsform_path: str. Path to XLSForm to convert.
"""
result = generate_images.wrapper(
xlsform_path=xlsform_path.get())
master.output.textbox.insert(tkinter.END, result)
@staticmethod
def validate_xform(master, java_path, validate_path, xform_path):
"""
Run the XForm validation, and put the result in the main textbox.
Parameters.
:param master: tkinter.Frame. Frame where master.output.textbox is.
:param java_path: str. Optional path to java.exe, otherwise this is
looked for in the envar JAVA_HOME.
:param validate_path: str. Optional path to ODK_Validate.jar. This is
packaged with the GUI but maybe a different version is desired.
:param xform_path: str. Path to XLSForm to convert.
"""
result = validate_xform.wrapper(
java_path=java_path.get(),
validate_path=validate_path.get(),
xform_path=xform_path.get())
master.output.textbox.insert(tkinter.END, result)
@staticmethod
def generate_editions(master, xform_path, sitelangs_path, collect_settings,
nest_in_odk_folders):
"""
Run the editions generator, and put the result in the main textbox.
Parameters.
:param master: tkinter.Frame. Frame where master.output.textbox is.
:param xform_path: str. Path to XLSForm to convert.
:param sitelangs_path: str. Path to site languages spreadsheet.
:param nest_in_odk_folders: int. 1=yes, 0=no. Nest in /odk/forms/*.
:param collect_settings: str. Optional path to collect.settings file.
"""
result = generate_editions.wrapper(
xform_path=xform_path.get(),
sitelangs_path=sitelangs_path.get(),
collect_settings=collect_settings.get(),
nest_in_odk_folders=nest_in_odk_folders.get())
master.output.textbox.insert(tkinter.END, result)
if __name__ == "__main__":
root = tkinter.Tk()
my_gui = ODKToolsGui(root)
root.mainloop()
| {
"content_hash": "5b1f1a761002614cde22c270f6cb452a",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 80,
"avg_line_length": 45.92,
"alnum_prop": 0.6394972623195619,
"repo_name": "lindsay-stevens/odk_tools",
"id": "e5cd476aa0d7267b4e4364015a7350060cbd3404",
"size": "16072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "odk_tools/gui/gui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "147895"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.