text stringlengths 4 1.02M | meta dict |
|---|---|
"""
holdtime.py
Created by Thomas Mangin on 2012-07-17.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from struct import pack
# =================================================================== HoldTime
class HoldTime (int):
def pack (self):
return pack('!H',self)
def keepalive (self):
return int(self/3)
def __len__ (self):
return 2
| {
"content_hash": "3a8ed23130d4ab53c0382bc272110031",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 18.45,
"alnum_prop": 0.5528455284552846,
"repo_name": "lochiiconnectivity/exabgp",
"id": "36e3222320447a354a43177c2204c228bcdf4884",
"size": "387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/exabgp/bgp/message/open/holdtime.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "1516"
},
{
"name": "Python",
"bytes": "1203596"
},
{
"name": "Shell",
"bytes": "17662"
}
],
"symlink_target": ""
} |
"""Deep Evolution solver.
Uses a neural net to predict the position and
mutation function to apply to a string. Neural net takes the string and predicts
1) [Batch x length] logits over positions in the string
2) [Batch x length x n_mutations] logits over mutation function for every
position in the string.
First, we sample the position from the position logits, take the logits
corresponding to the chosen position and sample the index of the mutation
function to apply to this position in the string. Currently, we apply
one mutation at a time. Finally, update the network parameters using REINFORCE
gradient estimator, where the advantage is the difference between parent and
child rewards. The log-likelihood is the sum of position and mutation
log-likelihoods.
By default, no selection is performed (we continue mutating the same batch,
use_selection_of_best = False). If use_selection_of_best=True, we choose best
samples from the previous batch and sample them with replacement to create
a new batch.
"""
import functools
from absl import logging
import gin
import jax
from jax.example_libraries import stax
from jax.example_libraries.optimizers import adam
import jax.numpy as jnp
import jax.random as jrand
from jax.scipy.special import logsumexp
import numpy as np
from amortized_bo import base_solver
from amortized_bo import data
from amortized_bo import utils
def logsoftmax(x, axis=-1):
"""Apply log softmax to an array of logits, log-normalizing along an axis."""
return x - logsumexp(x, axis, keepdims=True)
def softmax(x, axis=-1):
return jnp.exp(logsoftmax(x, axis))
def one_hot(x, k):
"""Create a one-hot encoding of x of size k."""
return jnp.eye(k)[x]
def gumbel_max_sampler(logits, temperature, rng):
"""Sample fom categorical distribution using Gumbel-Max trick.
Gumbel-Max trick:
https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/
https://arxiv.org/abs/1411.0030
Args:
logits: Unnormalized logits for categorical distribution.
[batch x n_mutations_to_sample x n_mutation_types]
temperature: temperature parameter for Gumbel-Max. The lower the
temperature, the closer the sample is to one-hot-encoding.
rng: Jax random number generator
Returns:
class_assignments: Sampled class assignments [batch]
log_likelihoods: Log-likelihoods of the sampled mutations [batch]
"""
# Normalize the logits
logits = logsoftmax(logits)
gumbel_noise = jrand.gumbel(rng, logits.shape)
softmax_logits = (logits + gumbel_noise) / temperature
soft_assignments = softmax(softmax_logits, -1)
class_assignments = jnp.argmax(soft_assignments, -1)
assert len(class_assignments.shape) == 2
# Output shape: [batch x num_mutations]
return class_assignments
##########################################
# Mutation-related helper functions
def _mutate_position(structure, pos_mask, fn):
"""Apply mutation fn to position specified by pos_mask."""
structure = np.array(structure).copy()
pos_mask = np.array(pos_mask).astype(int)
structure[pos_mask == 1] = fn(structure[pos_mask == 1])
return structure
def set_pos(x, pos_mask, val):
return _mutate_position(x, pos_mask, fn=lambda x: val)
def apply_mutations(samples, mutation_types, pos_masks, mutations,
use_assignment_mutations=False):
"""Apply the mutations specified by mutation types to the batch of strings.
Args:
samples: Batch of strings [batch x str_length]
mutation_types: IDs of mutation types to be applied to each string
[Batch x num_mutations]
pos_masks: One-hot encoding [Batch x num_mutations x str_length]
of the positions to be mutate in each string.
"num_mutations" positions will be mutated per string.
mutations: A list of possible mutation functions.
Functions should follow the format: fn(x, domain, pos_mask),
use_assignment_mutations: bool. Whether mutations are defined as
"Set position X to character C". If use_assignment_mutations=True,
then vectorize procedure of applying mutations to the string.
The index of mutation type should be equal to the index of the character.
Gives considerable speed-up to this function.
Returns:
perturbed_samples: Strings perturbed according to the mutation list.
"""
batch_size = samples.shape[0]
assert len(mutation_types) == batch_size
assert len(pos_masks) == batch_size
str_length = samples.shape[1]
assert pos_masks.shape[-1] == str_length
# Check that number of mutations is consistent in mutation_types and positions
assert mutation_types.shape[1] == pos_masks.shape[1]
num_mutations = mutation_types.shape[1]
# List of batched samples with 0,1,2,... mutations
# First element of the list contains original samples
# Last element has samples with all mutations applied to the string
perturbed_samples_with_i_mutations = [samples]
for i in range(num_mutations):
perturbed_samples = []
samples_to_perturb = perturbed_samples_with_i_mutations[-1]
if use_assignment_mutations:
perturbed_samples = samples_to_perturb.copy()
mask = pos_masks[:, i].astype(int)
# Assumes mutations are defined as "Set position to the character C"
perturbed_samples[np.array(mask) == 1] = mutation_types[:, i]
else:
for j in range(batch_size):
sample = samples_to_perturb[j].copy()
pos = pos_masks[j, i]
mut_id = mutation_types[j, i]
mutation = mutations[int(mut_id)]
perturbed_samples.append(mutation(sample, pos))
perturbed_samples = np.stack(perturbed_samples)
assert perturbed_samples.shape == samples.shape
perturbed_samples_with_i_mutations.append(perturbed_samples)
states = jnp.stack(perturbed_samples_with_i_mutations, 0)
assert states.shape == (num_mutations + 1,) + samples.shape
return states
##########################################
# pylint: disable=invalid-name
def OneHot(depth):
"""Layer for transforming inputs to one-hot encoding."""
def init_fun(rng, input_shape):
del rng
return input_shape + (depth,), ()
def apply_fun(params, inputs, **kwargs):
del params, kwargs
# Perform one-hot encoding
return jnp.eye(depth)[inputs.astype(int)]
return init_fun, apply_fun
def ExpandDims(axis=1):
"""Layer for expanding dimensions."""
def init_fun(rng, input_shape):
del rng
input_shape = tuple(input_shape)
if axis < 0:
dims = len(input_shape)
new_axis = dims + 1 - axis
else:
new_axis = axis
return (input_shape[:new_axis] + (1,) + input_shape[new_axis:]), ()
def apply_fun(params, inputs, **kwargs):
del params, kwargs
return jnp.expand_dims(inputs, axis)
return init_fun, apply_fun
def AssertNonZeroShape():
"""Layer for checking that no dimension has zero length."""
def init_fun(rng, input_shape):
del rng
return input_shape, ()
def apply_fun(params, inputs, **kwargs):
del params, kwargs
assert 0 not in inputs.shape
return inputs
return init_fun, apply_fun
# pylint: enable=invalid-name
def squeeze_layer(axis=1):
"""Layer for squeezing dimension along the axis."""
def init_fun(rng, input_shape):
del rng
if axis < 0:
raise ValueError("squeeze_layer: negative axis is not supported")
return (input_shape[:axis] + input_shape[(axis + 1):]), ()
def apply_fun(params, inputs, **kwargs):
del params, kwargs
return inputs.squeeze(axis)
return init_fun, apply_fun
def reduce_layer(reduce_fn=jnp.mean, axis=1):
"""Apply reduction function to the array along axis."""
def init_fun(rng, input_shape):
del rng
assert axis >= 0
assert len(input_shape) == 3
return input_shape[:axis - 1] + input_shape[axis + 1:], ()
def apply_fun(params, inputs, **kwargs):
del params, kwargs
return reduce_fn(inputs, axis=axis)
return init_fun, apply_fun
def _create_positional_encoding( # pylint: disable=invalid-name
input_shape, max_len=10000):
"""Helper: create positional encoding parameters."""
d_feature = input_shape[-1]
pe = np.zeros((max_len, d_feature), dtype=np.float32)
position = np.arange(0, max_len)[:, np.newaxis]
div_term = np.exp(np.arange(0, d_feature, 2) * -(np.log(10000.0) / d_feature))
pe[:, 0::2] = np.sin(position * div_term)
pe[:, 1::2] = np.cos(position * div_term)
pe = pe[np.newaxis, :, :] # [1, max_len, d_feature]
return jnp.array(pe) # These are trainable parameters, initialized as above.
def positional_encoding():
"""Concatenate positional encoding to the last dimension."""
def init_fun(rng, input_shape):
del rng
input_shape_for_enc = input_shape
params = _create_positional_encoding(input_shape_for_enc)
last_dim = input_shape[-1] + params.shape[-1]
return input_shape[:-1] + (last_dim,), (params,)
def apply_fun(params, inputs, **kwargs):
del kwargs
assert inputs.ndim == 4
params = params[0]
symbol_size = inputs.shape[-2]
enc = params[None, :, :symbol_size, :]
enc = jnp.repeat(enc, inputs.shape[0], 0)
return jnp.concatenate((inputs, enc), -1)
return init_fun, apply_fun
def cnn(conv_depth=300,
kernel_size=5,
n_conv_layers=2,
across_batch=False,
add_pos_encoding=False):
"""Build convolutional neural net."""
# Input shape: [batch x length x depth]
if across_batch:
extra_dim = 0
else:
extra_dim = 1
layers = [ExpandDims(axis=extra_dim)]
if add_pos_encoding:
layers.append(positional_encoding())
for _ in range(n_conv_layers):
layers.append(
stax.Conv(conv_depth, (1, kernel_size), padding="same", strides=(1, 1)))
layers.append(stax.Relu)
layers.append(AssertNonZeroShape())
layers.append(squeeze_layer(axis=extra_dim))
return stax.serial(*layers)
def build_model_stax(output_size,
n_dense_units=300,
conv_depth=300,
n_conv_layers=2,
n_dense_layers=0,
kernel_size=5,
across_batch=False,
add_pos_encoding=False,
mean_over_pos=False,
mode="train"):
"""Build a model with convolutional layers followed by dense layers."""
del mode
layers = [
cnn(conv_depth=conv_depth,
n_conv_layers=n_conv_layers,
kernel_size=kernel_size,
across_batch=across_batch,
add_pos_encoding=add_pos_encoding)
]
for _ in range(n_dense_layers):
layers.append(stax.Dense(n_dense_units))
layers.append(stax.Relu)
layers.append(stax.Dense(output_size))
if mean_over_pos:
layers.append(reduce_layer(jnp.mean, axis=1))
init_random_params, predict = stax.serial(*layers)
return init_random_params, predict
def sample_log_probs_top_k(log_probs, rng, temperature=1., k=1):
"""Sample categorical distribution of log probs using gumbel max trick."""
noise = jax.random.gumbel(rng, shape=log_probs.shape)
perturbed = (log_probs + noise) / temperature
samples = jnp.argsort(perturbed)[Ellipsis, -k:]
return samples
@jax.jit
def gather_positions(idx_to_gather, logits):
"""Collect logits corresponding to the positions in the string.
Used for collecting logits for:
1) positions in the string (depth = 1)
2) mutation types (depth = n_mut_types)
Args:
idx_to_gather: [batch_size x num_mutations] Indices of the positions
in the string to gather logits for.
logits: [batch_size x str_length x depth] Logits to index.
Returns:
Logits corresponding to the specified positions in the string:
[batch_size, num_mutations, depth]
"""
assert idx_to_gather.shape[0] == logits.shape[0]
assert idx_to_gather.ndim == 2
assert logits.ndim == 3
batch_size, num_mutations = idx_to_gather.shape
batch_size, str_length, depth = logits.shape
oh = one_hot(idx_to_gather, str_length)
assert oh.shape == (batch_size, num_mutations, str_length)
oh = oh[Ellipsis, None]
logits = logits[:, None, :, :]
assert oh.shape == (batch_size, num_mutations, str_length, 1)
assert logits.shape == (batch_size, 1, str_length, depth)
# Perform element-wise multiplication (with broadcasting),
# then sum over str_length dimension
result = jnp.sum(oh * logits, axis=-2)
assert result.shape == (batch_size, num_mutations, depth)
return result
class JaxMutationPredictor(object):
"""Implements training and predicting from a Jax model.
Attributes:
output_size: Tuple containing the sizes of components to predict
loss_fn: Loss function.
Format of the loss fn: fn(params, batch, mutations, problem, predictor)
loss_grad_fn: Gradient of the loss function
temperature: temperature parameter for Gumbel-Max sampler.
learning_rate: Learning rate for optimizer.
batch_size: Batch size of input
model_fn: Function which builds the model forward pass. Must have arguments
`vocab_size`, `max_len`, and `mode` and return Jax float arrays.
params: weights of the neural net
make_state: function to make optimizer state given the network parameters
rng: Jax random number generator
"""
def __init__(self,
vocab_size,
output_size,
loss_fn,
rng,
temperature=1,
learning_rate=0.001,
conv_depth=300,
n_conv_layers=2,
n_dense_units=300,
n_dense_layers=0,
kernel_size=5,
across_batch=False,
add_pos_encoding=False,
mean_over_pos=False,
model_fn=build_model_stax):
self.output_size = output_size
self.temperature = temperature
# Setup randomness.
self.rng = rng
model_settings = {
"output_size": output_size,
"n_dense_units": n_dense_units,
"n_dense_layers": n_dense_layers,
"conv_depth": conv_depth,
"n_conv_layers": n_conv_layers,
"across_batch": across_batch,
"kernel_size": kernel_size,
"add_pos_encoding": add_pos_encoding,
"mean_over_pos": mean_over_pos,
"mode": "train"
}
self._model_init, model_train = model_fn(**model_settings)
self._model_train = jax.jit(model_train)
model_settings["mode"] = "eval"
_, model_predict = model_fn(**model_settings)
self._model_predict = jax.jit(model_predict)
self.rng, subrng = jrand.split(self.rng)
_, init_params = self._model_init(subrng, (-1, -1, vocab_size))
self.params = init_params
# Setup parameters for model and optimizer
self.make_state, self._opt_update_state, self._get_params = adam(
learning_rate)
self.loss_fn = functools.partial(loss_fn, run_model_fn=self.run_model)
self.loss_grad_fn = jax.grad(self.loss_fn)
# Track steps of optimization so far.
self._step_idx = 0
def update_step(self, rewards, inputs, actions):
"""Performs a single update step on a batch of samples.
Args:
rewards: Batch [batch] of rewards for perturbed samples.
inputs: Batch [batch x length] of original samples
actions: actions applied on the samples
Raises:
ValueError: if any inputs are the wrong shape.
"""
grad_update = self.loss_grad_fn(
self.params,
rewards=rewards,
inputs=inputs,
actions=actions,
)
old_params = self.params
state = self.make_state(old_params)
state = self._opt_update_state(self._step_idx, grad_update, state)
self.params = self._get_params(state)
del old_params, state
self._step_idx += 1
def __call__(self, x, mode="eval"):
"""Calls predict function of model.
Args:
x: Batch of input samples.
mode: Mode for running the network: "train" or "eval"
Returns:
A list of tuples (class weights, log likelihood) for each of
output components predicted by the model.
"""
return self.run_model(x, self.params, mode="eval")
def run_model(self, x, params, mode="eval"):
"""Run the Jax model.
This function is used in __call__ to run the model in "eval" mode
and in the loss function to run the model in "train" mode.
Args:
x: Batch of input samples.
params: Network parameters
mode: Mode for running the network: "train" or "eval"
Returns:
Jax neural network output.
"""
if mode == "train":
model_fn = self._model_train
else:
model_fn = self._model_predict
self.rng, subrng = jax.random.split(self.rng)
return model_fn(params, inputs=x, rng=subrng)
#########################################
# Loss function
def reinforce_loss(rewards, log_likelihood):
"""Loss function for Jax model.
Args:
rewards: List of rewards [batch] for the perturbed samples.
log_likelihood: Log-likelihood of perturbations
Returns:
Scalar loss.
"""
rewards = jax.lax.stop_gradient(rewards)
# In general case, we assume that the loss is not differentiable
# Use REINFORCE
reinforce_estim = rewards * log_likelihood
# Take mean over the number of applied mutations, then across the batch
return -jnp.mean(jnp.mean(reinforce_estim, 1), 0)
def compute_entropy(log_probs):
"""Compute entropy of a set of log_probs."""
return -jnp.mean(jnp.mean(stax.softmax(log_probs) * log_probs, axis=-1))
def compute_advantage(params, critic_fn, rewards, inputs):
"""Compute the advantage: difference between rewards and predicted value.
Args:
params: parameters for the critic neural net
critic_fn: function to run critic neural net
rewards: rewards for the perturbed samples
inputs: original samples, used as input to the Jax model
Returns:
advantage: [batch_size x num_mutations]
"""
assert inputs.ndim == 4
num_mutations, batch_size, str_length, vocab_size = inputs.shape
inputs_reshaped = inputs.reshape(
(num_mutations * batch_size, str_length, vocab_size))
predicted_value = critic_fn(inputs_reshaped, params, mode="train")
assert predicted_value.shape == (num_mutations * batch_size, 1)
predicted_value = predicted_value.reshape((num_mutations, batch_size))
assert rewards.shape == (batch_size,)
rewards = jnp.repeat(rewards[None, :], num_mutations, 0)
assert rewards.shape == (num_mutations, batch_size)
advantage = rewards - predicted_value
advantage = jnp.transpose(advantage)
assert advantage.shape == (batch_size, num_mutations)
return advantage
def value_loss_fn(params, run_model_fn, rewards, inputs, actions=None):
"""Compute the loss for the value function.
Args:
params: parameters for the Jax model
run_model_fn: Jax model to run
rewards: rewards for the perturbed samples
inputs: original samples, used as input to the Jax model
actions: not used
Returns:
A scalar loss.
"""
del actions
advantage = compute_advantage(params, run_model_fn, rewards, inputs)
advantage = advantage**2
return jnp.sqrt(jnp.mean(advantage))
def split_mutation_predictor_output(output):
return stax.logsoftmax(output[:, :, -1]), stax.logsoftmax(output[:, :, :-1])
def run_model_and_compute_reinforce_loss(params,
run_model_fn,
rewards,
inputs,
actions,
n_mutations,
entropy_weight=0.1):
"""Run Jax model and compute REINFORCE loss.
Jax can compute the gradients of the model only if the model is called inside
the loss function. Here we call the Jax model, re-compute the log-likelihoods,
take log-likelihoods of the mutations and positions sampled before in
_propose function of the solver, and compute the loss.
Args:
params: parameters for the Jax model
run_model_fn: Jax model to run
rewards: rewards for the perturbed samples
inputs: original samples, used as input to the Jax model
actions: Tuple (mut_types [Batch], positions [Batch]) of mutation types
and positions sampled during the _propose() step of evolution solver.
n_mutations: Number of mutations. Used for one-hot encoding of mutations
entropy_weight: Weight on the entropy term added to the loss.
Returns:
A scalar loss.
"""
mut_types, positions = actions
mut_types_one_hot = one_hot(mut_types, n_mutations)
batch_size, str_length, _ = inputs.shape
assert mut_types.shape[0] == inputs.shape[0]
batch_size, num_mutations = mut_types.shape
assert mut_types.shape == positions.shape
assert mut_types.shape == rewards.shape
output = run_model_fn(inputs, params, mode="train")
pos_log_probs, all_mut_log_probs = split_mutation_predictor_output(output)
assert pos_log_probs.shape == (batch_size, str_length)
pos_log_probs = jnp.expand_dims(pos_log_probs, -1)
pos_log_likelihoods = gather_positions(positions, pos_log_probs)
assert pos_log_likelihoods.shape == (batch_size, num_mutations, 1)
# Sum over number of positions
pos_log_likelihoods = jnp.sum(pos_log_likelihoods, -1)
# all_mut_log_probs shape: [batch_size, str_length, n_mut_types]
assert all_mut_log_probs.shape[:2] == (batch_size, str_length)
# Get mutation logits corresponding to the chosen positions
mutation_logprobs = gather_positions(positions, all_mut_log_probs)
# Get log probs corresponding to the selected mutations
mut_log_likelihoods_oh = mutation_logprobs * mut_types_one_hot
# Sum over mutation types
mut_log_likelihoods = jnp.sum(mut_log_likelihoods_oh, -1)
assert mut_log_likelihoods.shape == (batch_size, num_mutations)
joint_log_likelihood = mut_log_likelihoods + pos_log_likelihoods
assert joint_log_likelihood.shape == (batch_size, num_mutations)
loss = reinforce_loss(rewards, joint_log_likelihood)
loss -= entropy_weight * compute_entropy(mutation_logprobs)
return loss
############################################
# MutationPredictorSolver
def initialize_uniformly(domain, batch_size, random_state):
return domain.sample_uniformly(batch_size, seed=random_state)
@gin.configurable
class MutationPredictorSolver(base_solver.BaseSolver):
"""Choose the mutation operator conditioned on the sample.
Sample from categorical distribution over available mutation operators
using Gumbel-Max trick
"""
def __init__(self,
domain,
model_fn=build_model_stax,
random_state=0,
**kwargs):
"""Constructs solver.
Args:
domain: discrete domain
model_fn: Function which builds the forward pass of predictor model.
random_state: Random state to initialize jax & np RNGs.
**kwargs: kwargs passed to config.
"""
super(MutationPredictorSolver, self).__init__(
domain=domain, random_state=random_state, **kwargs)
self.rng = jrand.PRNGKey(random_state)
self.rng, rng = jax.random.split(self.rng)
if self.domain.length < self.cfg.num_mutations:
logging.warning("Number of mutations to perform per string exceeds string"
" length. The number of mutation is set to be equal to "
"the string length.")
self.cfg.num_mutations = self.domain.length
# Right now the mutations are defined as "Set position X to character C".
# It allows to vectorize applying mutations to the string and speeds up
# the solver.
# If using other types of mutations, set self.use_assignment_mut=False.
self.mutations = []
for val in range(self.domain.vocab_size):
self.mutations.append(functools.partial(set_pos, val=val))
self.use_assignment_mut = True
mut_loss_fn = functools.partial(run_model_and_compute_reinforce_loss,
n_mutations=len(self.mutations))
# Predictor that takes the input string
# Outputs the weights over the 1) mutations types 2) position in string
if self.cfg.pretrained_model is None:
self._mut_predictor = self.cfg.predictor(
vocab_size=self.domain.vocab_size,
output_size=len(self.mutations) + 1,
loss_fn=mut_loss_fn,
rng=rng,
model_fn=build_model_stax,
conv_depth=self.cfg.actor_conv_depth,
n_conv_layers=self.cfg.actor_n_conv_layers,
n_dense_units=self.cfg.actor_n_dense_units,
n_dense_layers=self.cfg.actor_n_dense_layers,
across_batch=self.cfg.actor_across_batch,
add_pos_encoding=self.cfg.actor_add_pos_encoding,
kernel_size=self.cfg.actor_kernel_size,
learning_rate=self.cfg.actor_learning_rate,
)
if self.cfg.use_actor_critic:
self._value_predictor = self.cfg.predictor(
vocab_size=self.domain.vocab_size,
output_size=1,
rng=rng,
loss_fn=value_loss_fn,
model_fn=build_model_stax,
mean_over_pos=True,
conv_depth=self.cfg.critic_conv_depth,
n_conv_layers=self.cfg.critic_n_conv_layers,
n_dense_units=self.cfg.critic_n_dense_units,
n_dense_layers=self.cfg.critic_n_dense_layers,
across_batch=self.cfg.critic_across_batch,
add_pos_encoding=self.cfg.critic_add_pos_encoding,
kernel_size=self.cfg.critic_kernel_size,
learning_rate=self.cfg.critic_learning_rate,
)
else:
self._value_predictor = None
else:
self._mut_predictor, self._value_predictor = self.cfg.pretrained_model
self._data_for_grad_update = []
self._initialized = False
def _config(self):
cfg = super(MutationPredictorSolver, self)._config()
cfg.update(
dict(
predictor=JaxMutationPredictor,
temperature=1.,
initialize_dataset_fn=initialize_uniformly,
elite_set_size=10,
use_random_network=False,
exploit_with_best=True,
use_selection_of_best=False,
pretrained_model=None,
# Indicator to BO to pass in previous weights.
# As implemented in cl/318101597.
warmstart=True,
use_actor_critic=False,
num_mutations=5,
# Hyperparameters for actor
actor_learning_rate=0.001,
actor_conv_depth=300,
actor_n_conv_layers=1,
actor_n_dense_units=100,
actor_n_dense_layers=0,
actor_kernel_size=5,
actor_across_batch=False,
actor_add_pos_encoding=True,
# Hyperparameters for critic
critic_learning_rate=0.001,
critic_conv_depth=300,
critic_n_conv_layers=1,
critic_n_dense_units=300,
critic_n_dense_layers=0,
critic_kernel_size=5,
critic_across_batch=False,
critic_add_pos_encoding=True,
))
return cfg
def _get_unique(self, samples):
unique_population = data.Population()
unique_structures = set()
for sample in samples:
hashed_structure = utils.hash_structure(sample.structure)
if hashed_structure in unique_structures:
continue
unique_structures.add(hashed_structure)
unique_population.add_samples([sample])
return unique_population
def _get_best_samples_from_last_batch(self,
population,
n=1,
discard_duplicates=True):
best_samples = population.get_last_batch().best_n(
n, discard_duplicates=discard_duplicates)
return best_samples.structures, best_samples.rewards
def _select(self, population):
if self.cfg.use_selection_of_best:
# Choose best samples from the previous batch
structures, rewards = self._get_best_samples_from_last_batch(
population, self.cfg.elite_set_size)
# Choose the samples to perturb with replacement
idx = np.random.choice(len(structures), self.batch_size, replace=True) # pytype: disable=attribute-error # trace-all-classes
selected_structures = np.stack([structures[i] for i in idx])
selected_rewards = np.stack([rewards[i] for i in idx])
return selected_structures, selected_rewards
else:
# Just return the samples from the previous batch -- no selection
last_batch = population.get_last_batch()
structures = np.array([x.structure for x in last_batch])
rewards = np.array([x.reward for x in last_batch])
if len(last_batch) > self.batch_size: # pytype: disable=attribute-error # trace-all-classes
# Subsample the data
idx = np.random.choice(len(last_batch), self.batch_size, replace=False) # pytype: disable=attribute-error # trace-all-classes
structures = np.stack([structures[i] for i in idx])
rewards = np.stack([rewards[i] for i in idx])
return structures, rewards
def propose(self, num_samples, population=None, pending_samples=None):
# Initialize population randomly.
if self._initialized and population:
if num_samples != self.batch_size:
raise ValueError("Must maintain constant batch size between runs.")
counter = population.max_batch_index
if counter > 0:
if not self.cfg.use_random_network:
self._update_params(population)
else:
self.batch_size = num_samples
self._initialized = True
return self.cfg.initialize_dataset_fn(
self.domain, num_samples, random_state=self._random_state)
# Choose best samples so far -- [elite_set_size]
samples_to_perturb, parent_rewards = self._select(population)
perturbed, actions, mut_predictor_input = self._perturb(samples_to_perturb)
if not self.cfg.use_random_network:
self._data_for_grad_update.append({
"batch_index": population.current_batch_index + 1,
"mut_predictor_input": mut_predictor_input,
"actions": actions,
"parent_rewards": parent_rewards,
})
return np.asarray(perturbed)
def _perturb(self, parents, mode="train"):
length = parents.shape[1]
assert length == self.domain.length
parents_one_hot = one_hot(parents, self.domain.vocab_size)
output = self._mut_predictor(parents_one_hot)
pos_log_probs, all_mut_log_probs = split_mutation_predictor_output(output)
self.rng, subrng = jax.random.split(self.rng)
positions = sample_log_probs_top_k(
pos_log_probs,
subrng,
k=self.cfg.num_mutations,
temperature=self.cfg.temperature)
pos_masks = one_hot(positions, length)
mutation_logprobs = gather_positions(positions, all_mut_log_probs)
assert mutation_logprobs.shape == (output.shape[0], self.cfg.num_mutations,
output.shape[-1] - 1)
self.rng, subrng = jax.random.split(self.rng)
mutation_types = gumbel_max_sampler(
mutation_logprobs, self.cfg.temperature, subrng)
states = apply_mutations(parents, mutation_types, pos_masks, self.mutations,
use_assignment_mutations=self.use_assignment_mut)
# states shape: [num_mutations+1, batch, str_length]
# states[0] are original samples with no mutations
# states[-1] are strings with all mutations applied to them
states_oh = one_hot(states, self.domain.vocab_size)
# states_oh shape: [n_mutations+1, batch, str_length, vocab_size]
perturbed = states[-1]
return perturbed, (mutation_types, positions), states_oh
def _update_params(self, population):
if not self._data_for_grad_update:
return
dat = self._data_for_grad_update.pop()
assert dat["batch_index"] == population.current_batch_index
child_rewards = jnp.array(population.get_last_batch().rewards)
parent_rewards = dat["parent_rewards"]
all_states = dat["mut_predictor_input"]
# all_states shape: [num_mutations, batch_size, str_length, vocab_size]
# TODO(rubanova): rescale the rewards
terminal_rewards = child_rewards
if self.cfg.use_actor_critic:
# Update the value function
# Compute the difference between predicted value of intermediate states
# and the final reward.
self._value_predictor.update_step(
rewards=terminal_rewards,
inputs=all_states[:-1],
actions=None,
)
advantage = compute_advantage(self._value_predictor.params,
self._value_predictor.run_model,
terminal_rewards, all_states[:-1])
else:
advantage = child_rewards - parent_rewards
advantage = jnp.repeat(advantage[:, None], self.cfg.num_mutations, 1)
advantage = jax.lax.stop_gradient(advantage)
# Perform policy update.
# Compute policy on the original samples, like in _perturb function.
self._mut_predictor.update_step(
rewards=advantage,
inputs=all_states[0],
actions=dat["actions"])
del all_states, advantage
@property
def trained_model(self):
return (self._mut_predictor, self._value_predictor)
| {
"content_hash": "fceafbb368496d7a2f7ef0b3d3a70e4e",
"timestamp": "",
"source": "github",
"line_count": 963,
"max_line_length": 135,
"avg_line_length": 34.48494288681204,
"alnum_prop": 0.6573820349905146,
"repo_name": "google-research/google-research",
"id": "8e13469b68c5106432001be3f0eb73a8496100c0",
"size": "33817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amortized_bo/deep_evolution_solver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('sites', '0001_initial'),
('webquills', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='site',
name='author',
field=models.ForeignKey(blank=True, help_text='Default author for any page without an explicit author', null=True, on_delete=django.db.models.deletion.SET_NULL, to='webquills.author', verbose_name='author'),
),
]
| {
"content_hash": "f18074db0d347e9eed5168d6a26536f4",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 219,
"avg_line_length": 29.05,
"alnum_prop": 0.6333907056798623,
"repo_name": "veselosky/webquills",
"id": "0d153b77c0912822c264418e66101b556dfd983e",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "webquills/sites/migrations/0002_site_author.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21709"
},
{
"name": "HTML",
"bytes": "12296"
},
{
"name": "Python",
"bytes": "135964"
},
{
"name": "Shell",
"bytes": "3242"
}
],
"symlink_target": ""
} |
import unittest
import mock
from ...haystack.utils import AsyncIndexAdapter
class AsyncIndexAdapterTestCase(unittest.TestCase):
@mock.patch('libs.haystack.utils.get_identifier')
@mock.patch('libs.haystack.utils.HaystackActionTask')
def test_remove_object_should_call_haystack_action_task_delay_with_remove_action_and_obj_identifier(
self, haystack_action_task_class, get_identifier):
#setup
obj = mock.Mock()
# action
AsyncIndexAdapter.remove_object(obj)
# assert
self.assertTupleEqual((obj,), get_identifier.call_args[0])
self.assertTupleEqual((haystack_action_task_class.REMOVE_ACTION,
get_identifier.return_value,),
haystack_action_task_class.delay.call_args[0])
@mock.patch('libs.haystack.utils.get_identifier')
@mock.patch('libs.haystack.utils.HaystackActionTask')
def test_update_object_should_haystack_action_task_deya_with_update_action_and_obj_identifier(
self, haystack_action_task_class, get_identifier):
#setup
obj = mock.Mock()
# action
AsyncIndexAdapter.update_object(obj)
# assert
self.assertTupleEqual((obj,), get_identifier.call_args[0])
self.assertTupleEqual((haystack_action_task_class.UPDATE_ACTION,
get_identifier.return_value,),
haystack_action_task_class.delay.call_args[0])
| {
"content_hash": "6de6a7c36b6a301cbd3a12f294015f0f",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 104,
"avg_line_length": 39.22222222222222,
"alnum_prop": 0.6805949008498584,
"repo_name": "hellhound/dentexchange",
"id": "a5dea2072d81ee9b50ae936fab03c5cf0b047e3d",
"size": "1435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dentexchange/apps/libs/tests/haystack/test_async_index_adapter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6611"
},
{
"name": "JavaScript",
"bytes": "23966"
},
{
"name": "Python",
"bytes": "563289"
},
{
"name": "Shell",
"bytes": "2274"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("proposals", "0027_auto_20200502_0540"),
]
operations = [
migrations.AlterField(
model_name="historicalproposal",
name="video_url",
field=models.URLField(
blank=True,
default="",
help_text="Short 1-2 min video describing your talk",
),
),
migrations.AlterField(
model_name="proposal",
name="video_url",
field=models.URLField(
blank=True,
default="",
help_text="Short 1-2 min video describing your talk",
),
),
]
| {
"content_hash": "f8bbba45536699d6b7670593b692f076",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 69,
"avg_line_length": 25.903225806451612,
"alnum_prop": 0.5093399750933998,
"repo_name": "pythonindia/junction",
"id": "2fea0f83bd9fbda31d0c3ae5d0d65dcf09c9d92b",
"size": "873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "junction/proposals/migrations/0028_auto_20200617_2337.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190844"
},
{
"name": "HTML",
"bytes": "161794"
},
{
"name": "JavaScript",
"bytes": "49000"
},
{
"name": "Python",
"bytes": "379163"
},
{
"name": "Shell",
"bytes": "595"
}
],
"symlink_target": ""
} |
"""
Response functions and CFProjection response functions (see projfn.py) written
in C to optimize performance.
Requires the weave package; without it unoptimized versions are used.
$Id$
"""
__version__='$Revision$'
import param
from topo.base.functionfamily import ResponseFn,DotProduct
from topo.base.cf import CFPResponseFn, CFPRF_Plugin
from topo.misc.inlinec import inline,provide_unoptimized_equivalent,\
c_header,c_decorators
from topo.misc.pyxhandler import provide_unoptimized_equivalent_cy
from topo.responsefn.projfn import CFPRF_EuclideanDistance # pyflakes:ignore (optimized version provided)
# CEBALERT: this function works for 1D arrays; the docstring below is
# out of date. Need to update for this and other optimized fns that
# have been flattened.
class CFPRF_DotProduct_opt(CFPResponseFn):
"""
Dot-product response function.
Written in C for a manyfold speedup; see CFPRF_DotProduct for an
easier-to-read version in Python. The unoptimized Python version
is equivalent to this one, but it also works for 1D arrays.
"""
single_cf_fn = param.ClassSelector(ResponseFn,DotProduct(),readonly=True)
def __call__(self, iterator, input_activity, activity, strength, **params):
temp_act = activity # pyflakes:ignore (passed to weave C code)
irows,icols = input_activity.shape
X = input_activity.ravel() # pyflakes:ignore (passed to weave C code)
cfs = iterator.flatcfs
num_cfs = len(cfs) # pyflakes:ignore (passed to weave C code)
mask = iterator.mask.data # pyflakes:ignore (passed to weave C code)
cf_type = iterator.cf_type # pyflakes:ignore (passed to weave C code)
# Note: no performance hit from array indexing of mask and
# temp_act (r11447).
code = c_header + """
DECLARE_SLOT_OFFSET(weights,cf_type);
DECLARE_SLOT_OFFSET(input_sheet_slice,cf_type);
%(cfs_loop_pragma)s
for (int r=0; r<num_cfs; ++r) {
if(mask[r] == 0.0) {
temp_act[r] = 0;
} else {
PyObject *cf = PyList_GetItem(cfs,r);
// CONTIGUOUS_ARRAY_FROM_SLOT_OFFSET(float,weights,cf) <<<<<<<<<<<
LOOKUP_FROM_SLOT_OFFSET_UNDECL_DATA(float,weights,cf);
char *data = weights_obj->data;
int s0 = weights_obj->strides[0];
int s1 = weights_obj->strides[1];
LOOKUP_FROM_SLOT_OFFSET(int,input_sheet_slice,cf);
UNPACK_FOUR_TUPLE(int,rr1,rr2,cc1,cc2,input_sheet_slice);
double tot = 0.0;
npfloat *xj = X+icols*rr1+cc1;
// computes the dot product
for (int i=rr1; i<rr2; ++i) {
npfloat *xi = xj;
// float *wi = weights;
// for (int j=cc1; j<cc2; ++j) {
// tot += *wi * *xi;
// ++wi;
// ++xi;
// }
for (int j=cc1; j<cc2; ++j) {
tot += *((float *)(data + (i-rr1)*s0 + (j-cc1)*s1)) * *xi;
++xi;
}
xj += icols;
// weights += cc2-cc1;
}
temp_act[r] = tot*strength;
// DECREF_CONTIGUOUS_ARRAY(weights);
}
}
"""%c_decorators
inline(code, ['mask','X', 'strength', 'icols', 'temp_act','cfs','num_cfs','cf_type'],
local_dict=locals(), headers=['<structmember.h>'])
class CFPRF_DotProduct(CFPRF_Plugin):
"""
Wrapper written to allow transparent non-optimized fallback;
equivalent to CFPRF_Plugin(single_cf_fn=DotProduct()).
"""
# CB: should probably have single_cf_fn here & readonly
def __init__(self,**params):
super(CFPRF_DotProduct,self).__init__(single_cf_fn=DotProduct(),**params)
provide_unoptimized_equivalent("CFPRF_DotProduct_opt","CFPRF_DotProduct",locals())
try:
from optimized_cy import CFPRF_DotProduct_cyopt # pyflakes:ignore (optimized version)
except:
pass
provide_unoptimized_equivalent_cy("CFPRF_DotProduct_cyopt","CFPRF_DotProduct",locals())
# CEBERRORALERT: ignores the sheet mask!
class CFPRF_EuclideanDistance_opt(CFPResponseFn):
"""
Euclidean-distance response function.
Written in C for a several-hundred-times speedup; see
CFPRF_EuclideanDistance for an easier-to-read (but otherwise
equivalent) version in Python.
"""
def __call__(self, iterator, input_activity, activity, strength, **params):
temp_act = activity # pyflakes:ignore (passed to weave C code)
rows,cols = activity.shape
irows,icols = input_activity.shape
X = input_activity.ravel() # pyflakes:ignore (passed to weave C code)
cfs = iterator.flatcfs
num_cfs = len(cfs) # pyflakes:ignore (passed to weave C code)
code = c_header + """
#include <math.h>
npfloat *tact = temp_act;
double max_dist=0.0;
for (int r=0; r<num_cfs; ++r) {
PyObject *cf = PyList_GetItem(cfs,r);
PyObject *weights_obj = PyObject_GetAttrString(cf,"weights");
PyObject *slice_obj = PyObject_GetAttrString(cf,"input_sheet_slice");
float *wj = (float *)(((PyArrayObject*)weights_obj)->data);
int *slice = (int *)(((PyArrayObject*)slice_obj)->data);
int rr1 = *slice++;
int rr2 = *slice++;
int cc1 = *slice++;
int cc2 = *slice;
npfloat *xj = X+icols*rr1+cc1;
// computes the dot product
double tot = 0.0;
for (int i=rr1; i<rr2; ++i) {
npfloat *xi = xj;
float *wi = wj;
for (int j=cc1; j<cc2; ++j) {
double diff = *wi - *xi;
tot += diff*diff;
++wi;
++xi;
}
xj += icols;
wj += cc2-cc1;
}
double euclidean_distance = sqrt(tot);
if (euclidean_distance>max_dist)
max_dist = euclidean_distance;
*tact = euclidean_distance;
++tact;
// Anything obtained with PyObject_GetAttrString must be explicitly freed
Py_DECREF(weights_obj);
Py_DECREF(slice_obj);
}
tact = temp_act;
for (int r=0; r<num_cfs; ++r) {
*tact = strength*(max_dist - *tact);
++tact;
}
"""
inline(code, ['X', 'strength', 'icols', 'temp_act','cfs','num_cfs'],
local_dict=locals())
provide_unoptimized_equivalent("CFPRF_EuclideanDistance_opt","CFPRF_EuclideanDistance",locals())
| {
"content_hash": "55ee6bb0716e31783b36ff4ae14084b1",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 106,
"avg_line_length": 36.505050505050505,
"alnum_prop": 0.5302988378527946,
"repo_name": "ioam/svn-history",
"id": "94b285525d6dcfb2ad75cecea8f0395ff0b8a50c",
"size": "7228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "topo/responsefn/optimized.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Elixir",
"bytes": "202"
},
{
"name": "Emacs Lisp",
"bytes": "21378"
},
{
"name": "JavaScript",
"bytes": "12602"
},
{
"name": "PHP",
"bytes": "596890"
},
{
"name": "Perl",
"bytes": "43403"
},
{
"name": "Python",
"bytes": "3334771"
},
{
"name": "Shell",
"bytes": "9260"
},
{
"name": "Tcl",
"bytes": "433956"
}
],
"symlink_target": ""
} |
from marshmallow import Schema, fields
class MovieSchema(Schema):
pk = fields.Str(dump_only=True)
title = fields.Str(required=True)
likes = fields.Int(dump_only=True)
movie_schema = MovieSchema()
movies_schema = MovieSchema(many=True)
| {
"content_hash": "02ca8a65dcb06fccca0f0edfbb22d534",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 38,
"avg_line_length": 22.818181818181817,
"alnum_prop": 0.7290836653386454,
"repo_name": "pragmaticcoders/horse",
"id": "fa9c4607a118785de5d231dc1ccadeec1264839d",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horse/web/schemas/movie.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31460"
}
],
"symlink_target": ""
} |
import os
import sys
import unittest2
import stripe
from mock import patch
from stripe.test.helper import (StripeTestCase, NOW, DUMMY_CHARGE, DUMMY_CARD)
class FunctionalTests(StripeTestCase):
request_client = stripe.http_client.Urllib2Client
def setUp(self):
super(FunctionalTests, self).setUp()
def get_http_client(*args, **kwargs):
return self.request_client(*args, **kwargs)
self.client_patcher = patch(
'stripe.http_client.new_default_http_client')
client_mock = self.client_patcher.start()
client_mock.side_effect = get_http_client
def tearDown(self):
super(FunctionalTests, self).tearDown()
self.client_patcher.stop()
def test_dns_failure(self):
api_base = stripe.api_base
try:
stripe.api_base = 'https://my-invalid-domain.ireallywontresolve/v1'
self.assertRaises(stripe.error.APIConnectionError,
stripe.Customer.create)
finally:
stripe.api_base = api_base
def test_run(self):
charge = stripe.Charge.create(**DUMMY_CHARGE)
self.assertFalse(charge.refunded)
charge.refund()
self.assertTrue(charge.refunded)
def test_refresh(self):
charge = stripe.Charge.create(**DUMMY_CHARGE)
charge2 = stripe.Charge.retrieve(charge.id)
self.assertEqual(charge2.created, charge.created)
charge2.junk = 'junk'
charge2.refresh()
self.assertRaises(AttributeError, lambda: charge2.junk)
def test_list_accessors(self):
customer = stripe.Customer.create(card=DUMMY_CARD)
self.assertEqual(customer['created'], customer.created)
customer['foo'] = 'bar'
self.assertEqual(customer.foo, 'bar')
def test_raise(self):
EXPIRED_CARD = DUMMY_CARD.copy()
EXPIRED_CARD['exp_month'] = NOW.month - 2
EXPIRED_CARD['exp_year'] = NOW.year - 2
self.assertRaises(stripe.error.CardError, stripe.Charge.create,
amount=100, currency='usd', card=EXPIRED_CARD)
def test_response_headers(self):
EXPIRED_CARD = DUMMY_CARD.copy()
EXPIRED_CARD['exp_month'] = NOW.month - 2
EXPIRED_CARD['exp_year'] = NOW.year - 2
try:
stripe.Charge.create(amount=100, currency='usd', card=EXPIRED_CARD)
self.fail('charge creation with expired card did not fail')
except stripe.error.CardError as e:
self.assertTrue(e.request_id.startswith('req_'))
def test_unicode(self):
# Make sure unicode requests can be sent
self.assertRaises(stripe.error.InvalidRequestError,
stripe.Charge.retrieve,
id='☃')
def test_none_values(self):
customer = stripe.Customer.create(plan=None)
self.assertTrue(customer.id)
def test_missing_id(self):
customer = stripe.Customer()
self.assertRaises(stripe.error.InvalidRequestError, customer.refresh)
class RequestsFunctionalTests(FunctionalTests):
request_client = stripe.http_client.RequestsClient
class UrlfetchFunctionalTests(FunctionalTests):
request_client = 'urlfetch'
def setUp(self):
if stripe.http_client.urlfetch is None:
self.skipTest(
'`urlfetch` from Google App Engine is unavailable.')
else:
super(UrlfetchFunctionalTests, self).setUp()
class PycurlFunctionalTests(FunctionalTests):
def setUp(self):
if not os.environ.get('STRIPE_TEST_PYCURL'):
self.skipTest('Pycurl skipped as STRIPE_TEST_PYCURL is not set')
if sys.version_info >= (3, 0):
self.skipTest('Pycurl is not supported in Python 3')
else:
super(PycurlFunctionalTests, self).setUp()
request_client = stripe.http_client.PycurlClient
class AuthenticationErrorTest(StripeTestCase):
def test_invalid_credentials(self):
key = stripe.api_key
try:
stripe.api_key = 'invalid'
stripe.Customer.create()
except stripe.error.AuthenticationError as e:
self.assertEqual(401, e.http_status)
self.assertTrue(isinstance(e.http_body, str))
self.assertTrue(isinstance(e.json_body, dict))
# Note that an invalid API key bypasses many of the standard
# facilities in the API server so currently no Request ID is
# returned.
finally:
stripe.api_key = key
class CardErrorTest(StripeTestCase):
def test_declined_card_props(self):
EXPIRED_CARD = DUMMY_CARD.copy()
EXPIRED_CARD['exp_month'] = NOW.month - 2
EXPIRED_CARD['exp_year'] = NOW.year - 2
try:
stripe.Charge.create(amount=100, currency='usd', card=EXPIRED_CARD)
except stripe.error.CardError as e:
self.assertEqual(402, e.http_status)
self.assertTrue(isinstance(e.http_body, str))
self.assertTrue(isinstance(e.json_body, dict))
self.assertTrue(e.request_id.startswith('req_'))
class InvalidRequestErrorTest(StripeTestCase):
def test_nonexistent_object(self):
try:
stripe.Charge.retrieve('invalid')
except stripe.error.InvalidRequestError as e:
self.assertEqual(404, e.http_status)
self.assertTrue(isinstance(e.http_body, str))
self.assertTrue(isinstance(e.json_body, dict))
self.assertTrue(e.request_id.startswith('req_'))
def test_invalid_data(self):
try:
stripe.Charge.create()
except stripe.error.InvalidRequestError as e:
self.assertEqual(400, e.http_status)
self.assertTrue(isinstance(e.http_body, str))
self.assertTrue(isinstance(e.json_body, dict))
self.assertTrue(e.request_id.startswith('req_'))
if __name__ == '__main__':
unittest2.main()
| {
"content_hash": "5dc5701bed759cb07d20311543ef7da3",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 79,
"avg_line_length": 34.12,
"alnum_prop": 0.6295427901524033,
"repo_name": "colehertz/Stripe-Tester",
"id": "b1f5a124f57403ca5247a58cd9869190db30e6ca",
"size": "5997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python3.5/site-packages/stripe/test/test_integration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1413550"
},
{
"name": "Shell",
"bytes": "3672"
}
],
"symlink_target": ""
} |
from rx import Lock
from rx.abstractobserver import AbstractObserver
from rx.disposables import SerialDisposable
class ScheduledObserver(AbstractObserver):
def __init__(self, scheduler, observer):
super(ScheduledObserver, self).__init__(self._next, self._error, self._completed)
self.scheduler = scheduler
self.observer = observer
self.lock = Lock()
self.is_acquired = False
self.has_faulted = False
self.queue = []
self.disposable = SerialDisposable()
# Note to self: list append is thread safe
# http://effbot.org/pyfaq/what-kinds-of-global-value-mutation-are-thread-safe.htm
def _next(self, value):
def action():
self.observer.on_next(value)
self.queue.append(action)
def _error(self, exception):
def action():
self.observer.on_error(exception)
self.queue.append(action)
def _completed(self):
def action():
self.observer.on_completed()
self.queue.append(action)
def ensure_active(self):
is_owner = False
with self.lock:
if not self.has_faulted and len(self.queue):
is_owner = not self.is_acquired
self.is_acquired = True
if is_owner:
self.disposable.disposable = self.scheduler.schedule_recursive(self.run)
def run(self, recurse, state):
parent = self
with self.lock:
if len(parent.queue):
work = parent.queue.pop(0)
else:
parent.is_acquired = False
return
try:
work()
except Exception:
with self.lock:
parent.queue = []
parent.has_faulted = True
raise
recurse()
def dispose(self):
super(ScheduledObserver, self).dispose()
self.disposable.dispose()
| {
"content_hash": "d2a0dd37668309964b40fbf39d4095ad",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 89,
"avg_line_length": 27.65714285714286,
"alnum_prop": 0.574896694214876,
"repo_name": "dbrattli/RxPY",
"id": "e3b7e2bc91b90d1231b823754ffd8ae477425577",
"size": "1936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rx/scheduledobserver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1334787"
}
],
"symlink_target": ""
} |
import requests
from registryctl.common.exceptions import *
REGISTRY_HEADER_V2 = 'application/vnd.docker.distribution.manifest.v2+json'
class AuthClient(object):
def __init__(self, url, auth=None, commands=None):
self._url = url
self._session = requests.Session()
self._basic_auth = auth
self._header_v2 = {'Accept': REGISTRY_HEADER_V2}
def _get(self, url):
return self._session.get(url=url,auth=self._basic_auth)
def _head(self, url, header):
req_head = self._session.head(url=url,
auth=self._basic_auth,
headers=self._header_v2)
if req_head.status_code == 404:
raise NotFoundException()
return req_head.headers[header]
def _delete(self, url):
return self._session.delete(url=url,auth=self._basic_auth)
| {
"content_hash": "d6e1f1cd0bfbeb433f479ed3b25fcdfc",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 34.07692307692308,
"alnum_prop": 0.5948081264108352,
"repo_name": "Flav35/registryctl",
"id": "c20499f7cfe77b52b076738b27e824998ad8bffb",
"size": "886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "registryctl/common/authclient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "211"
},
{
"name": "Python",
"bytes": "8562"
}
],
"symlink_target": ""
} |
class RelationPredictionOracle:
def __init__(self):
pass
def initialize(self):
pass
def update(self, batch):
return 0
def predict_batch(self, batch):
predictions = batch.get_relation_class_labels()
return predictions | {
"content_hash": "5ab543151589b1599174831f47c14710",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 55,
"avg_line_length": 18.466666666666665,
"alnum_prop": 0.6101083032490975,
"repo_name": "MichSchli/QuestionAnsweringGCN",
"id": "66772bdedc7d9fbc7df77aa48036f23a69b7b455",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/relation_prediction_oracle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "730851"
},
{
"name": "Shell",
"bytes": "1446"
}
],
"symlink_target": ""
} |
def numberChain(x):
'''A number chain is created by continuously adding the square of the
digits in a number to form a new number until it has been seen before.
for example,
44 -> 32 -> 13 -> 10 -> 1
85 -> 89 -> 145 -> 42 -> 20 -> 4 -> 16 -> 37 -> 58 -> 89
Therefore any chain that arrives at 1 or 89 will become stuck in an endless
loop. What is most amazing is that EVERY starting number will eventually
arrive at 1 or 89.
'''
luku = x
while luku not in (1, 89):
yield x
luku = x
x = 0
for i in str(luku):
x += int(i)**2
if __name__ == '__main__':
for i in numberChain(42):
print(i)
| {
"content_hash": "6712cbdd8d7678ae40a55e6c216b4ace",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 80,
"avg_line_length": 27.52,
"alnum_prop": 0.5625,
"repo_name": "boarpig/math",
"id": "910aa09d52873a1a4dcec9f9f7cb3d4dc102029b",
"size": "707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numberchain.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12243"
}
],
"symlink_target": ""
} |
"""
Allows you to test unimplemented code in a development environment
by specifying a default argument as an argument
to the decorator (or you can leave it off to specify None to be returned.
"""
# Annotation wrapper annotation method
def unimplemented(defaultval):
print type(unimplemented)
print type(defaultval)
if(type(defaultval) == type(unimplemented)):
def f(func):
print func
return func
return f
#return lambda : None
else:
# Actual annotation
def unimp_wrapper(func):
print func
# What we replace the function with
def wrapper(*arg):
return defaultval
return wrapper
return unimp_wrapper
def implemented(func):
print func
return func
@unimplemented("implemented")
@implemented
def funcc(i):
return i
@unimplemented(unimplemented)
@implemented
def funcc2(i):
return i
print funcc(2)
print funcc2(2)
| {
"content_hash": "3ccbb525f5282453df39cac8bcefd650",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 73,
"avg_line_length": 23.926829268292682,
"alnum_prop": 0.6503567787971458,
"repo_name": "t10471/python",
"id": "4203f6657b06790fd34dcef7c6d411b62ad2c64c",
"size": "1006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "practice/src/decorator/def/unimplemented_function_replacement.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "592"
},
{
"name": "Python",
"bytes": "243645"
},
{
"name": "Shell",
"bytes": "350"
}
],
"symlink_target": ""
} |
"""
Django settings for www project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0p8qrpm6kg65zx5ty0j_wmtl_y_$^80a@f&v%^mte*bq+md)fq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'architools.apps.ArchitoolsConfig',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.facebook',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'www.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
WSGI_APPLICATION = 'www.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'CET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "f07d978ae883d4875bf8799809472fbb",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 91,
"avg_line_length": 25.120805369127517,
"alnum_prop": 0.6927598183275447,
"repo_name": "gmoulard/OpenStack_Tenant_Doc",
"id": "b633fd8f8f7c83dcc620845dbba322903e1f8063",
"size": "3743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/www/settings.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "50"
},
{
"name": "Python",
"bytes": "22"
}
],
"symlink_target": ""
} |
"""
Unit Tests for :py:class:`ironic.conductor.rpcapi.ConductorAPI`.
"""
import copy
import mock
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_messaging import _utils as messaging_utils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import manager as conductor_manager
from ironic.conductor import rpcapi as conductor_rpcapi
from ironic import objects
from ironic.tests import base as tests_base
from ironic.tests.unit.db import base
from ironic.tests.unit.db import utils as dbutils
CONF = cfg.CONF
class ConductorRPCAPITestCase(tests_base.TestCase):
def test_versions_in_sync(self):
self.assertEqual(
conductor_manager.ConductorManager.RPC_API_VERSION,
conductor_rpcapi.ConductorAPI.RPC_API_VERSION)
class RPCAPITestCase(base.DbTestCase):
def setUp(self):
super(RPCAPITestCase, self).setUp()
self.fake_node = dbutils.get_test_node(driver='fake-driver')
self.fake_node_obj = objects.Node._from_db_object(
objects.Node(self.context), self.fake_node)
self.fake_portgroup = dbutils.get_test_portgroup()
def test_serialized_instance_has_uuid(self):
self.assertIn('uuid', self.fake_node)
def test_get_topic_for_known_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({'hostname': 'fake-host',
'drivers': ['fake-driver']})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
expected_topic = 'fake-topic.fake-host'
self.assertEqual(expected_topic,
rpcapi.get_topic_for(self.fake_node_obj))
def test_get_topic_for_unknown_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({'hostname': 'fake-host',
'drivers': ['other-driver']})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.NoValidHost,
rpcapi.get_topic_for,
self.fake_node_obj)
def test_get_topic_doesnt_cache(self):
CONF.set_override('host', 'fake-host')
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.NoValidHost,
rpcapi.get_topic_for,
self.fake_node_obj)
self.dbapi.register_conductor({'hostname': 'fake-host',
'drivers': ['fake-driver']})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
expected_topic = 'fake-topic.fake-host'
self.assertEqual(expected_topic,
rpcapi.get_topic_for(self.fake_node_obj))
def test_get_topic_for_driver_known_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({
'hostname': 'fake-host',
'drivers': ['fake-driver'],
})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertEqual('fake-topic.fake-host',
rpcapi.get_topic_for_driver('fake-driver'))
def test_get_topic_for_driver_unknown_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({
'hostname': 'fake-host',
'drivers': ['other-driver'],
})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.DriverNotFound,
rpcapi.get_topic_for_driver,
'fake-driver')
def test_get_topic_for_driver_doesnt_cache(self):
CONF.set_override('host', 'fake-host')
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.DriverNotFound,
rpcapi.get_topic_for_driver,
'fake-driver')
self.dbapi.register_conductor({
'hostname': 'fake-host',
'drivers': ['fake-driver'],
})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertEqual('fake-topic.fake-host',
rpcapi.get_topic_for_driver('fake-driver'))
def _test_rpcapi(self, method, rpc_method, **kwargs):
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
expected_retval = 'hello world' if rpc_method == 'call' else None
expected_topic = 'fake-topic'
if 'host' in kwargs:
expected_topic += ".%s" % kwargs['host']
target = {
"topic": expected_topic,
"version": kwargs.pop('version', rpcapi.RPC_API_VERSION)
}
expected_msg = copy.deepcopy(kwargs)
self.fake_args = None
self.fake_kwargs = None
def _fake_can_send_version_method(version):
return messaging_utils.version_is_compatible(
rpcapi.RPC_API_VERSION, version)
def _fake_prepare_method(*args, **kwargs):
for kwd in kwargs:
self.assertEqual(kwargs[kwd], target[kwd])
return rpcapi.client
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
with mock.patch.object(rpcapi.client,
"can_send_version") as mock_can_send_version:
mock_can_send_version.side_effect = _fake_can_send_version_method
with mock.patch.object(rpcapi.client, "prepare") as mock_prepared:
mock_prepared.side_effect = _fake_prepare_method
with mock.patch.object(rpcapi.client,
rpc_method) as mock_method:
mock_method.side_effect = _fake_rpc_method
retval = getattr(rpcapi, method)(self.context, **kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [self.context, method, expected_msg]
for arg, expected_arg in zip(self.fake_args,
expected_args):
self.assertEqual(arg, expected_arg)
def test_update_node(self):
self._test_rpcapi('update_node',
'call',
version='1.1',
node_obj=self.fake_node)
def test_change_node_power_state(self):
self._test_rpcapi('change_node_power_state',
'call',
version='1.6',
node_id=self.fake_node['uuid'],
new_state=states.POWER_ON)
def test_vendor_passthru(self):
self._test_rpcapi('vendor_passthru',
'call',
version='1.20',
node_id=self.fake_node['uuid'],
driver_method='test-driver-method',
http_method='test-http-method',
info={"test_info": "test_value"})
def test_driver_vendor_passthru(self):
self._test_rpcapi('driver_vendor_passthru',
'call',
version='1.20',
driver_name='test-driver-name',
driver_method='test-driver-method',
http_method='test-http-method',
info={'test_key': 'test_value'})
def test_do_node_deploy(self):
self._test_rpcapi('do_node_deploy',
'call',
version='1.22',
node_id=self.fake_node['uuid'],
rebuild=False,
configdrive=None)
def test_do_node_tear_down(self):
self._test_rpcapi('do_node_tear_down',
'call',
version='1.6',
node_id=self.fake_node['uuid'])
def test_validate_driver_interfaces(self):
self._test_rpcapi('validate_driver_interfaces',
'call',
version='1.5',
node_id=self.fake_node['uuid'])
def test_destroy_node(self):
self._test_rpcapi('destroy_node',
'call',
version='1.9',
node_id=self.fake_node['uuid'])
def test_get_console_information(self):
self._test_rpcapi('get_console_information',
'call',
version='1.11',
node_id=self.fake_node['uuid'])
def test_set_console_mode(self):
self._test_rpcapi('set_console_mode',
'call',
version='1.11',
node_id=self.fake_node['uuid'],
enabled=True)
def test_update_port(self):
fake_port = dbutils.get_test_port()
self._test_rpcapi('update_port',
'call',
version='1.13',
port_obj=fake_port)
def test_get_driver_properties(self):
self._test_rpcapi('get_driver_properties',
'call',
version='1.16',
driver_name='fake-driver')
def test_set_boot_device(self):
self._test_rpcapi('set_boot_device',
'call',
version='1.17',
node_id=self.fake_node['uuid'],
device=boot_devices.DISK,
persistent=False)
def test_get_boot_device(self):
self._test_rpcapi('get_boot_device',
'call',
version='1.17',
node_id=self.fake_node['uuid'])
def test_get_supported_boot_devices(self):
self._test_rpcapi('get_supported_boot_devices',
'call',
version='1.17',
node_id=self.fake_node['uuid'])
def test_get_node_vendor_passthru_methods(self):
self._test_rpcapi('get_node_vendor_passthru_methods',
'call',
version='1.21',
node_id=self.fake_node['uuid'])
def test_get_driver_vendor_passthru_methods(self):
self._test_rpcapi('get_driver_vendor_passthru_methods',
'call',
version='1.21',
driver_name='fake-driver')
def test_inspect_hardware(self):
self._test_rpcapi('inspect_hardware',
'call',
version='1.24',
node_id=self.fake_node['uuid'])
def test_continue_node_clean(self):
self._test_rpcapi('continue_node_clean',
'cast',
version='1.27',
node_id=self.fake_node['uuid'])
def test_get_raid_logical_disk_properties(self):
self._test_rpcapi('get_raid_logical_disk_properties',
'call',
version='1.30',
driver_name='fake-driver')
def test_set_target_raid_config(self):
self._test_rpcapi('set_target_raid_config',
'call',
version='1.30',
node_id=self.fake_node['uuid'],
target_raid_config='config')
def test_do_node_clean(self):
clean_steps = [{'step': 'upgrade_firmware', 'interface': 'deploy'},
{'step': 'upgrade_bmc', 'interface': 'management'}]
self._test_rpcapi('do_node_clean',
'call',
version='1.32',
node_id=self.fake_node['uuid'],
clean_steps=clean_steps)
def test_object_action(self):
self._test_rpcapi('object_action',
'call',
version='1.31',
objinst='fake-object',
objmethod='foo',
args=tuple(),
kwargs=dict())
def test_object_class_action_versions(self):
self._test_rpcapi('object_class_action_versions',
'call',
version='1.31',
objname='fake-object',
objmethod='foo',
object_versions={'fake-object': '1.0'},
args=tuple(),
kwargs=dict())
def test_object_backport_versions(self):
self._test_rpcapi('object_backport_versions',
'call',
version='1.31',
objinst='fake-object',
object_versions={'fake-object': '1.0'})
@mock.patch.object(messaging.RPCClient, 'can_send_version', autospec=True)
def test_object_action_invalid_version(self, mock_send):
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
mock_send.return_value = False
self.assertRaises(NotImplementedError,
rpcapi.object_action, self.context,
objinst='fake-object', objmethod='foo',
args=tuple(), kwargs=dict())
@mock.patch.object(messaging.RPCClient, 'can_send_version', autospec=True)
def test_object_class_action_versions_invalid_version(self, mock_send):
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
mock_send.return_value = False
self.assertRaises(NotImplementedError,
rpcapi.object_class_action_versions, self.context,
objname='fake-object', objmethod='foo',
object_versions={'fake-object': '1.0'},
args=tuple(), kwargs=dict())
@mock.patch.object(messaging.RPCClient, 'can_send_version', autospec=True)
def test_object_backport_versions_invalid_version(self, mock_send):
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
mock_send.return_value = False
self.assertRaises(NotImplementedError,
rpcapi.object_backport_versions, self.context,
objinst='fake-object',
object_versions={'fake-object': '1.0'})
def test_update_portgroup(self):
self._test_rpcapi('update_portgroup',
'call',
version='1.33',
portgroup_obj=self.fake_portgroup)
def test_destroy_portgroup(self):
self._test_rpcapi('destroy_portgroup',
'call',
version='1.33',
portgroup=self.fake_portgroup)
def test_heartbeat(self):
self._test_rpcapi('heartbeat',
'call',
node_id='fake-node',
callback_url='http://ramdisk.url:port',
version='1.34')
| {
"content_hash": "2310c62f0280bafb2769a1d4895d2638",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 78,
"avg_line_length": 40.1839378238342,
"alnum_prop": 0.5072529172845077,
"repo_name": "bacaldwell/ironic",
"id": "99f57f3643f1c4c3470c66d4c68f8bb249e131c6",
"size": "16183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/tests/unit/conductor/test_rpcapi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "4207766"
},
{
"name": "Shell",
"bytes": "69242"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import sys
import hashlib
import hmac
import json
import base64
import time
import boto3
# Add the lib directory to the path for Lambda to load our libs
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
from requests import Session, HTTPError # NOQA
from requests.packages.urllib3.util.retry import Retry # NOQA
from requests.adapters import HTTPAdapter # NOQA
class StaticRetry(Retry):
def sleep(self):
time.sleep(3)
def verify_signature(secret, signature, payload):
computed_hash = hmac.new(secret.encode('ascii'), payload, hashlib.sha1)
computed_signature = '='.join(['sha1', computed_hash.hexdigest()])
return hmac.compare_digest(computed_signature.encode('ascii'), signature.encode('ascii'))
def relay_github(event, requests_session):
verified = verify_signature(event['secret'],
event['x_hub_signature'],
event['payload'])
print('Signature verified: {}'.format(verified))
if verified:
response = requests_session.post(event['jenkins_url'],
headers={
'Content-Type': 'application/json',
'X-GitHub-Delivery': event['x_github_delivery'],
'X-GitHub-Event': event['x_github_event'],
'X-Hub-Signature': event['x_hub_signature']
},
data=event['payload'])
response.raise_for_status()
else:
raise HTTPError('400 Client Error: Bad Request')
def relay_quay(event, requests_session):
response = requests_session.post(event['jenkins_url'],
headers={
'Content-Type': 'application/json'
},
data=event['payload'])
response.raise_for_status()
def relay_sqs(event):
sqs_queue = event.get('sqs_queue')
sqs_region = event.get('sqs_region', 'us-west-2')
assert sqs_queue
sqs_obj = dict(
timestamp=int(time.time()),
jenkins_url=event.get('jenkins_url'),
headers={
'Content-Type': 'application/json',
'X-GitHub-Delivery': event['x_github_delivery'],
'X-GitHub-Event': event['x_github_event'],
'X-Hub-Signature': event['x_hub_signature']
},
data=event['payload'],
)
sqs = boto3.client('sqs', sqs_region)
queue_url = sqs.get_queue_url(QueueName=sqs_queue)['QueueUrl']
sqs.send_message(
QueueUrl=queue_url,
MessageBody=json.dumps(sqs_obj).decode(),
)
def lambda_handler(event, context):
print('Webhook received')
event['payload'] = base64.b64decode(event['payload'])
requests_session = Session()
retries = StaticRetry(total=40)
requests_session.mount(event['jenkins_url'], HTTPAdapter(max_retries=retries))
if event.get('service') == 'quay':
relay_quay(event, requests_session)
if event.get('service') == 'sqs':
relay_sqs(event)
else:
relay_github(event, requests_session)
print('Successfully relayed payload')
if __name__ == '__main__':
pass
| {
"content_hash": "afdd16e9d37887b8b610ab714dece064",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 93,
"avg_line_length": 33.663366336633665,
"alnum_prop": 0.5620588235294117,
"repo_name": "pristineio/lambda-webhook",
"id": "49d4019d9f7d76e2d0f16a2040e5e3e9a8bf6e58",
"size": "3422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lambdawebhook/hook.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10676"
}
],
"symlink_target": ""
} |
import mock
import unittest
from cloudbaseinit.plugins.windows import userdata_plugins
class MultipartUserDataPluginTest(unittest.TestCase):
def setUp(self):
fake_path = 'fake path'
self._userdata = userdata_plugins.PluginSet(fake_path)
@mock.patch('glob.glob')
@mock.patch('cloudbaseinit.plugins.windows.userdata_plugins.'
'load_from_file')
def test_load(self, mock_load_from_file, mock_glob):
fake_files = ['fake_file.py']
mock_plugin = mock.MagicMock()
mock_glob.return_value = fake_files
mock_load_from_file.return_value = mock_plugin
self._userdata.load()
mock_glob.assert_called_once_with(self._userdata.path + '/*.py')
mock_load_from_file.assert_called_once_with('fake_file.py',
self._userdata)
self.assertEqual(self._userdata.set[mock_plugin.type], mock_plugin)
| {
"content_hash": "cec0467b579da6afc31af51098712e8e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 36.15384615384615,
"alnum_prop": 0.6340425531914894,
"repo_name": "telerik/cloudbase-init",
"id": "fe0b9cba888f3ef11336c7294aae2c038a6cd1f1",
"size": "1614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudbaseinit/tests/plugins/windows/test_userdata_plugins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "684477"
}
],
"symlink_target": ""
} |
RAWDATA_DIR = '/home/cmb-06/as/skchoudh/dna/Oct_10_2016_HuR_Human_Mouse_Liver/rna-seq/Penalva_L_08182016/mouse'
## Output directory
OUT_DIR = '/home/cmb-06/as/skchoudh/dna/Oct_10_2016_HuR_Human_Mouse_Liver/RNA-Seq_mouse'
## Absolute location to 're-ribo/scripts' directory
SRC_DIR = '/home/cmb-panasas2/skchoudh/github_projects/re-ribo/scripts'
## Genome fasta location
GENOME_FASTA = '/home/cmb-panasas2/skchoudh/genomes/mm10/fasta/mm10.fa'
## Chromosome sizes location
CHROM_SIZES = '/home/cmb-panasas2/skchoudh/genomes/mm10/fasta/mm10.chrom.sizes'
## Path to STAR index (will be generated if does not exist)
STAR_INDEX = '/home/cmb-panasas2/skchoudh/genomes/mm10/star_annotated'
## GTF path
GTF = '/home/cmb-panasas2/skchoudh/genomes/mm10/annotation/gencode.vM11.annotation.gtf'
## GenePred bed downloaded from UCSC
## (this is used for inferring the type of experiment i.e stranded/non-stranded
## and hence is not required)
GENE_BED = '/home/cmb-panasas2/skchoudh/genomes/mm10/annotation/gencode.vM11.genes.bed'
## Path to bed file with start codon coordinates
START_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/mm10/annotation/gencode.vM11.gffutils.start_codon.bed'
## Path to bed file with stop codon coordinates
STOP_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/mm10/annotation/gencode.vM11.gffutils.stop_codon.bed'
## Path to bed file containing CDS coordinates
CDS_BED = '/home/cmb-panasas2/skchoudh/genomes/mm10/annotation/gencode.vM11.gffutils.cds.bed'
# We don't have these so just use CDs bed to get the pipeline running
UTR5_BED = '/home/cmb-panasas2/skchoudh/genomes/mm10/annotation/gencode.vM11.gffutils.UTR5.bed'
UTR3_BED = '/home/cmb-panasas2/skchoudh/genomes/mm10/annotation/gencode.vM11.gffutils.UTR3.bed'
## Name of python2 environment
## The following package needs to be installed in that environment
## numpy scipy matploltib seaborn pysam pybedtools htseq
## you can do: conda create -n python2 PYTHON=2 && source activate python2 && conda install numpy scipy matploltib seaborn pysam pybedtools htseq
PYTHON2ENV = 'python2'
############################################Do Not Edit#############################################
HTSEQ_STRANDED = 'yes'
FEATURECOUNTS_S = '-s 1'
FEATURECOUNTS_T = 'CDS'
HTSEQ_MODE = 'intersection-strict'
| {
"content_hash": "91be911f4f42c531d8328e1495cffdc6",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 145,
"avg_line_length": 35.859375,
"alnum_prop": 0.738562091503268,
"repo_name": "saketkc/ribo-seq-snakemake",
"id": "3eb5cc3328bea5ded0ea12f76b0eff6834c67b0a",
"size": "2342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configs/Oct_10_2016_HuR_Mouse_rna.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "57714"
},
{
"name": "R",
"bytes": "3539"
},
{
"name": "Shell",
"bytes": "8205"
}
],
"symlink_target": ""
} |
import numpy as np
import pytest
from natural_bm.preprocessing import make_PCA_matrix, make_ZCA_matrix
import natural_bm.backend.theano_backend as BTH
import natural_bm.backend.numpy_backend as BNP
#%% Test prep for tests
def _diag_non_diag(x):
diag = np.copy(np.diag(x))
index = np.where(~np.eye(x.shape[0], dtype=bool))
non_diag = x[index]
return diag, non_diag
def setup_data():
n = 10
data = np.random.normal(size=(n, n))
cov = np.cov(data.T)
return data, cov
def setup_datatype(B, data, cov):
data = B.variable(data)
cov = B.variable(cov)
return data, cov
def setup_white(whitetype, cov, eps):
if whitetype == 'PCA':
white = make_PCA_matrix(cov, eps)
elif whitetype == 'ZCA':
white = make_ZCA_matrix(cov, eps)
else:
raise NotImplementedError
return white
def verify(whitetype, eps, cov, new_cov):
# break into diag and non-diagonal
diag, non_diag = _diag_non_diag(new_cov)
if whitetype == 'PCA':
atol = 2e-2
# Non-diagonal elements should all be zero
assert np.allclose(non_diag, 0.0, atol=atol)
if eps == 1e-2:
# first element is one
assert np.isclose(diag[0], 1.0, atol=atol)
# other elements, besides last, should be greater than zero
assert np.all(diag[1:-1] > 0.0)
elif eps == 1e-5:
# last element is zero, but everyone else should be one
assert np.allclose(diag[:-1], 1.0, atol=atol)
else:
raise NotImplementedError
elif whitetype == 'ZCA':
# break old cov into diag and non-diagonal
diag_old, non_diag_old = _diag_non_diag(cov)
# checks on diagonal
assert np.max(diag) <= 1.0
assert np.min(diag) >= 0.0
# checks on non-diagonal, just a statistical argument
assert np.std(non_diag) < 0.75*np.std(non_diag_old)
else:
raise NotImplementedError
#%%
@pytest.mark.parametrize('whitetype', ['PCA', 'ZCA'], ids=['PCA', 'ZCA'])
@pytest.mark.parametrize('B', [BTH, BNP], ids=["BTH", "BNP"])
@pytest.mark.parametrize('eps', [1e-2, 1e-5], ids=['1e-2', '1e-5'])
def test_white(whitetype, B, eps):
data, cov = setup_data()
data, cov = setup_datatype(B, data, cov)
white = setup_white(whitetype, cov, eps)
new_data = data.dot(white)
if B == BTH:
cov = B.get_value(cov)
new_data = B.eval(new_data)
new_cov = np.cov(new_data.T)
verify(whitetype, eps, cov, new_cov)
#%% Main
if __name__ == '__main__':
pytest.main([__file__])
| {
"content_hash": "fae319c550b63d0881dc6c27e1fc2438",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 73,
"avg_line_length": 25.98,
"alnum_prop": 0.596612779060816,
"repo_name": "alexhunterlang/natural_bm",
"id": "1a67e7fcbbb934434c0f0bb5ba13874a24c3b36a",
"size": "2602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/natural_bm/test_preprocessing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "215690"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.version import Version
from twilio.rest.lookups.v2.phone_number import PhoneNumberList
class V2(Version):
def __init__(self, domain):
"""
Initialize the V2 version of Lookups
:returns: V2 version of Lookups
:rtype: twilio.rest.lookups.v2.V2.V2
"""
super(V2, self).__init__(domain)
self.version = 'v2'
self._phone_numbers = None
@property
def phone_numbers(self):
"""
:rtype: twilio.rest.lookups.v2.phone_number.PhoneNumberList
"""
if self._phone_numbers is None:
self._phone_numbers = PhoneNumberList(self)
return self._phone_numbers
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Lookups.V2>'
| {
"content_hash": "3a4c7bac418aba95a44128c8d9ff0cad",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 67,
"avg_line_length": 24.121951219512194,
"alnum_prop": 0.5551061678463094,
"repo_name": "twilio/twilio-python",
"id": "c09cb2a4b22965a2cb18204ce5879350da031733",
"size": "1004",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/lookups/v2/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
} |
""" Time taking.
Mostly for measurements of Nuitka of itself, e.g. how long did it take to
call an external tool.
"""
from timeit import default_timer as timer
from nuitka.Tracing import general
class StopWatch(object):
__slots__ = ("start_time", "end_time")
def __init__(self):
self.start_time = None
self.end_time = None
def start(self):
self.start_time = timer()
def restart(self):
self.start()
def end(self):
self.end_time = timer()
stop = end
def getDelta(self):
if self.end_time is not None:
return self.end_time - self.start_time
else:
return timer() - self.start_time
class TimerReport(object):
"""Timer that reports how long things took.
Mostly intended as a wrapper for external process calls.
"""
__slots__ = ("message", "decider", "logger", "timer", "min_report_time")
def __init__(self, message, logger=None, decider=True, min_report_time=None):
self.message = message
if decider is True:
decider = lambda: True
if logger is None:
logger = general
self.logger = logger
self.decider = decider
self.min_report_time = min_report_time
self.timer = None
def getTimer(self):
return self.timer
def __enter__(self):
self.timer = StopWatch()
self.timer.start()
def __exit__(self, exception_type, exception_value, exception_tb):
self.timer.end()
delta_time = self.timer.getDelta()
# Check if its above the provided limit.
above_threshold = (
self.min_report_time is None or delta_time >= self.min_report_time
)
if exception_type is None and above_threshold and self.decider():
self.logger.info(self.message % self.timer.getDelta())
| {
"content_hash": "9d7915a39a2187e8d193657a31ca7bd2",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 81,
"avg_line_length": 24.363636363636363,
"alnum_prop": 0.597547974413646,
"repo_name": "kayhayen/Nuitka",
"id": "d9bc198d8b9439c88cee1701ed2886c3e3ebb1d7",
"size": "2656",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "nuitka/utils/Timing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1868"
},
{
"name": "C",
"bytes": "617681"
},
{
"name": "C++",
"bytes": "149777"
},
{
"name": "Python",
"bytes": "6603718"
},
{
"name": "Shell",
"bytes": "1088"
}
],
"symlink_target": ""
} |
"""The Risco integration."""
import asyncio
from datetime import timedelta
import logging
from pyrisco import CannotConnectError, OperationError, RiscoAPI, UnauthorizedError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_PASSWORD,
CONF_PIN,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.storage import Store
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DATA_COORDINATOR, DEFAULT_SCAN_INTERVAL, DOMAIN, EVENTS_COORDINATOR
PLATFORMS = ["alarm_control_panel", "binary_sensor", "sensor"]
UNDO_UPDATE_LISTENER = "undo_update_listener"
LAST_EVENT_STORAGE_VERSION = 1
LAST_EVENT_TIMESTAMP_KEY = "last_event_timestamp"
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Risco component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Risco from a config entry."""
data = entry.data
risco = RiscoAPI(data[CONF_USERNAME], data[CONF_PASSWORD], data[CONF_PIN])
try:
await risco.login(async_get_clientsession(hass))
except CannotConnectError as error:
raise ConfigEntryNotReady() from error
except UnauthorizedError:
_LOGGER.exception("Failed to login to Risco cloud")
return False
scan_interval = entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
coordinator = RiscoDataUpdateCoordinator(hass, risco, scan_interval)
await coordinator.async_config_entry_first_refresh()
events_coordinator = RiscoEventsDataUpdateCoordinator(
hass, risco, entry.entry_id, 60
)
undo_listener = entry.add_update_listener(_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
DATA_COORDINATOR: coordinator,
UNDO_UPDATE_LISTENER: undo_listener,
EVENTS_COORDINATOR: events_coordinator,
}
async def start_platforms():
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_setup(entry, platform)
for platform in PLATFORMS
]
)
await events_coordinator.async_refresh()
hass.async_create_task(start_platforms())
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def _update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
class RiscoDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching risco data."""
def __init__(self, hass, risco, scan_interval):
"""Initialize global risco data updater."""
self.risco = risco
interval = timedelta(seconds=scan_interval)
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=interval,
)
async def _async_update_data(self):
"""Fetch data from risco."""
try:
return await self.risco.get_state()
except (CannotConnectError, UnauthorizedError, OperationError) as error:
raise UpdateFailed(error) from error
class RiscoEventsDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching risco data."""
def __init__(self, hass, risco, eid, scan_interval):
"""Initialize global risco data updater."""
self.risco = risco
self._store = Store(
hass, LAST_EVENT_STORAGE_VERSION, f"risco_{eid}_last_event_timestamp"
)
interval = timedelta(seconds=scan_interval)
super().__init__(
hass,
_LOGGER,
name=f"{DOMAIN}_events",
update_interval=interval,
)
async def _async_update_data(self):
"""Fetch data from risco."""
last_store = await self._store.async_load() or {}
last_timestamp = last_store.get(
LAST_EVENT_TIMESTAMP_KEY, "2020-01-01T00:00:00Z"
)
try:
events = await self.risco.get_events(last_timestamp, 10)
except (CannotConnectError, UnauthorizedError, OperationError) as error:
raise UpdateFailed(error) from error
if len(events) > 0:
await self._store.async_save({LAST_EVENT_TIMESTAMP_KEY: events[0].time})
return events
| {
"content_hash": "1279f3c9e12fa7b2e76456c912e459b7",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 88,
"avg_line_length": 32.73202614379085,
"alnum_prop": 0.6615415335463258,
"repo_name": "adrienbrault/home-assistant",
"id": "eec30553870f93c2438e027ac20e7b63755bad35",
"size": "5008",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/risco/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
import os
from awscli.customizations.commands import BasicCommand
from awscli.customizations.configure.writer import ConfigFileWriter
from . import PREDEFINED_SECTION_NAMES
class ConfigureSetCommand(BasicCommand):
NAME = 'set'
DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',
'_description.rst')
SYNOPSIS = 'aws configure set varname value [--profile profile-name]'
EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')
ARG_TABLE = [
{'name': 'varname',
'help_text': 'The name of the config value to set.',
'action': 'store',
'cli_type_name': 'string', 'positional_arg': True},
{'name': 'value',
'help_text': 'The value to set.',
'action': 'store',
'no_paramfile': True, # To disable the default paramfile behavior
'cli_type_name': 'string', 'positional_arg': True},
]
# Any variables specified in this list will be written to
# the ~/.aws/credentials file instead of ~/.aws/config.
_WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',
'aws_session_token']
def __init__(self, session, config_writer=None):
super(ConfigureSetCommand, self).__init__(session)
if config_writer is None:
config_writer = ConfigFileWriter()
self._config_writer = config_writer
def _run_main(self, args, parsed_globals):
varname = args.varname
value = args.value
section = 'default'
# Before handing things off to the config writer,
# we need to find out three things:
# 1. What section we're writing to (section).
# 2. The name of the config key (varname)
# 3. The actual value (value).
if '.' not in varname:
# unqualified name, scope it to the current
# profile (or leave it as the 'default' section if
# no profile is set).
if self._session.profile is not None:
section = 'profile %s' % self._session.profile
else:
# First figure out if it's been scoped to a profile.
parts = varname.split('.')
if parts[0] in ('default', 'profile'):
# Then we know we're scoped to a profile.
if parts[0] == 'default':
section = 'default'
remaining = parts[1:]
else:
# [profile, profile_name, ...]
section = "profile %s" % parts[1]
remaining = parts[2:]
varname = remaining[0]
if len(remaining) == 2:
value = {remaining[1]: value}
elif parts[0] not in PREDEFINED_SECTION_NAMES:
if self._session.profile is not None:
section = 'profile %s' % self._session.profile
else:
profile_name = self._session.get_config_variable('profile')
if profile_name is not None:
section = profile_name
varname = parts[0]
if len(parts) == 2:
value = {parts[1]: value}
elif len(parts) == 2:
# Otherwise it's something like "set preview.service true"
# of something in the [plugin] section.
section, varname = parts
config_filename = os.path.expanduser(
self._session.get_config_variable('config_file'))
updated_config = {'__section__': section, varname: value}
if varname in self._WRITE_TO_CREDS_FILE:
config_filename = os.path.expanduser(
self._session.get_config_variable('credentials_file'))
section_name = updated_config['__section__']
if section_name.startswith('profile '):
updated_config['__section__'] = section_name[8:]
self._config_writer.update_config(updated_config, config_filename)
| {
"content_hash": "679612e4d8a515be0164aaba4712c335",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 44.94444444444444,
"alnum_prop": 0.5500618046971569,
"repo_name": "mnahm5/django-estore",
"id": "35300d8c3c12b0fe7471ab6fcea14cc592aa9667",
"size": "4610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/site-packages/awscli/customizations/configure/set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1351"
},
{
"name": "Batchfile",
"bytes": "2695"
},
{
"name": "C",
"bytes": "460931"
},
{
"name": "C++",
"bytes": "26115"
},
{
"name": "CSS",
"bytes": "144496"
},
{
"name": "HTML",
"bytes": "155544"
},
{
"name": "JavaScript",
"bytes": "206799"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "24837167"
},
{
"name": "Shell",
"bytes": "4408"
},
{
"name": "Tcl",
"bytes": "1237789"
},
{
"name": "TeX",
"bytes": "1527"
}
],
"symlink_target": ""
} |
import os
import re
import socket
import struct
from email.utils import parseaddr
from functools import wraps
import idna
from django.conf import settings
from django.contrib.auth.views import redirect_to_login
from django.urls import reverse
from django.utils.translation import ugettext as _
from modoboa.admin import models as admin_models
from modoboa.lib.email_utils import (
split_address, split_local_part, split_mailbox
)
from modoboa.lib.exceptions import InternalError
from modoboa.lib.sysutils import exec_cmd
from modoboa.lib.web_utils import NavigationParameters
from modoboa.parameters import tools as param_tools
from .models import Policy, Users
from .utils import smart_bytes, smart_text
def selfservice(ssfunc=None):
"""Decorator used to expose views to the 'self-service' feature
The 'self-service' feature allows users to act on quarantined
messages without beeing authenticated.
This decorator only acts as a 'router'.
:param ssfunc: the function to call if the 'self-service'
pre-requisites are satisfied
"""
def decorator(f):
@wraps(f)
def wrapped_f(request, *args, **kwargs):
secret_id = request.GET.get("secret_id")
if not secret_id and request.user.is_authenticated:
return f(request, *args, **kwargs)
if not param_tools.get_global_parameter("self_service"):
return redirect_to_login(
reverse("modoboa_amavis:index")
)
return ssfunc(request, *args, **kwargs)
return wrapped_f
return decorator
class AMrelease(object):
def __init__(self):
conf = dict(param_tools.get_global_parameters("modoboa_amavis"))
try:
if conf["am_pdp_mode"] == "inet":
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((conf["am_pdp_host"], conf["am_pdp_port"]))
else:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(conf["am_pdp_socket"])
except socket.error as err:
raise InternalError(
_("Connection to amavis failed: %s" % str(err))
)
def decode(self, answer):
def repl(match):
return struct.pack("B", int(match.group(0)[1:], 16))
return re.sub(br"%([0-9a-fA-F]{2})", repl, answer)
def __del__(self):
self.sock.close()
def sendreq(self, mailid, secretid, recipient, *others):
self.sock.send(
smart_bytes("""request=release
mail_id=%s
secret_id=%s
quar_type=Q
recipient=%s
""" % (smart_text(mailid), smart_text(secretid), smart_text(recipient))))
answer = self.sock.recv(1024)
answer = self.decode(answer)
if re.search(br"250 [\d\.]+ Ok", answer):
return True
return False
class SpamassassinClient(object):
"""A stupid spamassassin client."""
def __init__(self, user, recipient_db):
"""Constructor."""
conf = dict(param_tools.get_global_parameters("modoboa_amavis"))
self._sa_is_local = conf["sa_is_local"]
self._default_username = conf["default_user"]
self._recipient_db = recipient_db
self._setup_cache = {}
self._username_cache = []
if user.role == "SimpleUsers":
if conf["user_level_learning"]:
self._username = user.email
else:
self._username = None
self.error = None
if self._sa_is_local:
self._learn_cmd = self._find_binary("sa-learn")
self._learn_cmd += " --{0} --no-sync -u {1}"
self._learn_cmd_kwargs = {}
self._expected_exit_codes = [0]
self._sync_cmd = self._find_binary("sa-learn")
self._sync_cmd += " -u {0} --sync"
else:
self._learn_cmd = self._find_binary("spamc")
self._learn_cmd += " -d {0} -p {1}".format(
conf["spamd_address"], conf["spamd_port"]
)
self._learn_cmd += " -L {0} -u {1}"
self._learn_cmd_kwargs = {}
self._expected_exit_codes = [5, 6]
def _find_binary(self, name):
"""Find path to binary."""
code, output = exec_cmd("which {}".format(name))
if not code:
return smart_text(output).strip()
known_paths = getattr(settings, "SA_LOOKUP_PATH", ("/usr/bin", ))
for path in known_paths:
bpath = os.path.join(path, name)
if os.path.isfile(bpath) and os.access(bpath, os.X_OK):
return bpath
raise InternalError(_("Failed to find {} binary").format(name))
def _get_mailbox_from_rcpt(self, rcpt):
"""Retrieve a mailbox from a recipient address."""
local_part, domname, extension = (
split_mailbox(rcpt, return_extension=True))
try:
mailbox = admin_models.Mailbox.objects.select_related(
"domain").get(address=local_part, domain__name=domname)
except admin_models.Mailbox.DoesNotExist:
alias = admin_models.Alias.objects.filter(
address="{}@{}".format(local_part, domname),
aliasrecipient__r_mailbox__isnull=False).first()
if not alias:
raise InternalError(_("No recipient found"))
if alias.type != "alias":
return None
mailbox = alias.aliasrecipient_set.filter(
r_mailbox__isnull=False).first()
return mailbox
def _get_domain_from_rcpt(self, rcpt):
"""Retrieve a domain from a recipient address."""
local_part, domname = split_mailbox(rcpt)
domain = admin_models.Domain.objects.filter(name=domname).first()
if not domain:
raise InternalError(_("Local domain not found"))
return domain
def _learn(self, rcpt, msg, mtype):
"""Internal method to call the learning command."""
if self._username is None:
if self._recipient_db == "global":
username = self._default_username
elif self._recipient_db == "domain":
domain = self._get_domain_from_rcpt(rcpt)
username = domain.name
condition = (
username not in self._setup_cache and
setup_manual_learning_for_domain(domain))
if condition:
self._setup_cache[username] = True
else:
mbox = self._get_mailbox_from_rcpt(rcpt)
if mbox is None:
username = self._default_username
else:
if isinstance(mbox, admin_models.Mailbox):
username = mbox.full_address
elif isinstance(mbox, admin_models.AliasRecipient):
username = mbox.address
else:
username = None
condition = (
username is not None and
username not in self._setup_cache and
setup_manual_learning_for_mbox(mbox))
if condition:
self._setup_cache[username] = True
else:
username = self._username
if username not in self._setup_cache:
mbox = self._get_mailbox_from_rcpt(username)
if mbox and setup_manual_learning_for_mbox(mbox):
self._setup_cache[username] = True
if username not in self._username_cache:
self._username_cache.append(username)
cmd = self._learn_cmd.format(mtype, username)
code, output = exec_cmd(
cmd, pinput=smart_bytes(msg), **self._learn_cmd_kwargs)
if code in self._expected_exit_codes:
return True
self.error = smart_text(output)
return False
def learn_spam(self, rcpt, msg):
"""Learn new spam."""
return self._learn(rcpt, msg, "spam")
def learn_ham(self, rcpt, msg):
"""Learn new ham."""
return self._learn(rcpt, msg, "ham")
def done(self):
"""Call this method at the end of the processing."""
if self._sa_is_local:
for username in self._username_cache:
cmd = self._sync_cmd.format(username)
exec_cmd(cmd, **self._learn_cmd_kwargs)
class QuarantineNavigationParameters(NavigationParameters):
"""
Specific NavigationParameters subclass for the quarantine.
"""
def __init__(self, request):
super(QuarantineNavigationParameters, self).__init__(
request, "quarantine_navparams"
)
self.parameters += [
("pattern", "", False),
("criteria", "from_addr", False),
("msgtype", None, False),
("viewrequests", None, False)
]
def _store_page(self):
"""Specific method to store the current page."""
if self.request.GET.get("reset_page", None) or "page" not in self:
self["page"] = 1
else:
page = self.request.GET.get("page", None)
if page is not None:
self["page"] = int(page)
def back_to_listing(self):
"""Return the current listing URL.
Looks into the user's session and the current request to build
the URL.
:return: a string
"""
url = "listing"
params = []
navparams = self.request.session[self.sessionkey]
if "page" in navparams:
params += ["page=%s" % navparams["page"]]
if "order" in navparams:
params += ["sort_order=%s" % navparams["order"]]
params += ["%s=%s" % (p[0], navparams[p[0]])
for p in self.parameters if p[0] in navparams]
if params:
url += "?%s" % ("&".join(params))
return url
def create_user_and_policy(name, priority=7):
"""Create records.
Create two records (a user and a policy) using :keyword:`name` as
an identifier.
:param str name: name
:return: the new ``Policy`` object
"""
if Users.objects.filter(email=name).exists():
return Policy.objects.get(policy_name=name[:32])
policy = Policy.objects.create(policy_name=name[:32])
Users.objects.create(
email=name, fullname=name, priority=priority, policy=policy
)
return policy
def create_user_and_use_policy(name, policy, priority=7):
"""Create a *users* record and use an existing policy.
:param str name: user record name
:param str policy: string or Policy instance
"""
if isinstance(policy, str):
policy = Policy.objects.get(policy_name=policy[:32])
Users.objects.get_or_create(
email=name, fullname=name, priority=priority, policy=policy
)
def update_user_and_policy(oldname, newname):
"""Update records.
:param str oldname: old name
:param str newname: new name
"""
if oldname == newname:
return
u = Users.objects.get(email=oldname)
u.email = newname
u.fullname = newname
u.policy.policy_name = newname[:32]
u.policy.save(update_fields=["policy_name"])
u.save()
def delete_user_and_policy(name):
"""Delete records.
:param str name: identifier
"""
try:
u = Users.objects.get(email=name)
except Users.DoesNotExist:
return
u.policy.delete()
u.delete()
def delete_user(name):
"""Delete a *users* record.
:param str name: user record name
"""
try:
Users.objects.get(email=name).delete()
except Users.DoesNotExist:
pass
def manual_learning_enabled(user):
"""Check if manual learning is enabled or not.
Also check for :kw:`user` if necessary.
:return: True if learning is enabled, False otherwise.
"""
conf = dict(param_tools.get_global_parameters("modoboa_amavis"))
if not conf["manual_learning"]:
return False
if user.role != "SuperAdmins":
if user.has_perm("admin.view_domains"):
manual_learning = (
conf["domain_level_learning"] or conf["user_level_learning"])
else:
manual_learning = conf["user_level_learning"]
return manual_learning
return True
def setup_manual_learning_for_domain(domain):
"""Setup manual learning if necessary.
:return: True if learning has been setup, False otherwise
"""
if Policy.objects.filter(sa_username=domain.name).exists():
return False
policy = Policy.objects.get(policy_name="@{}".format(domain.name[:32]))
policy.sa_username = domain.name
policy.save()
return True
def setup_manual_learning_for_mbox(mbox):
"""Setup manual learning if necessary.
:return: True if learning has been setup, False otherwise
"""
result = False
if (isinstance(mbox, admin_models.AliasRecipient) and
mbox.r_mailbox is not None):
mbox = mbox.r_mailbox
if isinstance(mbox, admin_models.Mailbox):
pname = mbox.full_address[:32]
if not Policy.objects.filter(policy_name=pname).exists():
policy = create_user_and_policy(pname)
policy.sa_username = mbox.full_address
policy.save()
for alias in mbox.alias_addresses:
create_user_and_use_policy(alias, policy)
result = True
return result
def make_query_args(address, exact_extension=True, wildcard=None,
domain_search=False):
assert isinstance(address, str), "address should be of type str"
conf = dict(param_tools.get_global_parameters("modoboa_amavis"))
local_part, domain = split_address(address)
if not conf["localpart_is_case_sensitive"]:
local_part = local_part.lower()
if domain:
domain = domain.lstrip("@").rstrip(".")
domain = domain.lower()
orig_domain = domain
domain = idna.encode(domain, uts46=True).decode("ascii")
delimiter = conf["recipient_delimiter"]
local_part, extension = split_local_part(local_part, delimiter=delimiter)
query_args = []
if (
conf["localpart_is_case_sensitive"] or
(domain and domain != orig_domain)
):
query_args.append(address)
if extension:
query_args.append("%s%s%s@%s" % (
local_part, delimiter, extension, domain))
if delimiter and not exact_extension and wildcard:
query_args.append("%s%s%s@%s" % (
local_part, delimiter, wildcard, domain))
query_args.append("%s@%s" % (local_part, domain))
if domain_search:
query_args.append("@%s" % domain)
query_args.append("@.")
return query_args
def cleanup_email_address(address):
address = parseaddr(address)
if address[0]:
return "%s <%s>" % address
return address[1]
| {
"content_hash": "1513ac8057bbab6883bfd0137a792416",
"timestamp": "",
"source": "github",
"line_count": 437,
"max_line_length": 77,
"avg_line_length": 34.24713958810069,
"alnum_prop": 0.5827208338901511,
"repo_name": "modoboa/modoboa-amavis",
"id": "e54ce0b9b9085d595ad9e79b215554a4eeb0591b",
"size": "14991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modoboa_amavis/lib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1504"
},
{
"name": "HTML",
"bytes": "10271"
},
{
"name": "JavaScript",
"bytes": "16144"
},
{
"name": "Python",
"bytes": "147572"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
import project.fields.datetime_aware_jsonfield
class Migration(migrations.Migration):
dependencies = [("studies", "0003_auto_20170615_1404")]
operations = [
migrations.RenameField(
model_name="response", old_name="results", new_name="exp_data"
),
migrations.AddField(
model_name="response",
name="completed",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="response",
name="conditions",
field=project.fields.datetime_aware_jsonfield.DateTimeAwareJSONField(
default=dict
),
),
migrations.AddField(
model_name="response",
name="global_event_timings",
field=project.fields.datetime_aware_jsonfield.DateTimeAwareJSONField(
default=dict
),
),
migrations.AddField(
model_name="response",
name="sequence",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=128),
blank=True,
default=list,
size=None,
),
),
]
| {
"content_hash": "ad0b54729682585ee174b805ac11126b",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 81,
"avg_line_length": 29.847826086956523,
"alnum_prop": 0.5644573925710124,
"repo_name": "CenterForOpenScience/lookit-api",
"id": "60d1ee886777f96c0aff449821badcd50c69b1af",
"size": "1446",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "studies/migrations/0004_auto_20170616_0244.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11022"
},
{
"name": "HTML",
"bytes": "185393"
},
{
"name": "Python",
"bytes": "481700"
},
{
"name": "Shell",
"bytes": "1166"
}
],
"symlink_target": ""
} |
import Tkinter as tk
import tkFont
import re
from uuid import uuid4
from collections import Counter
from generic import Point, Size, color_variant, distance
from area import Area
import things
AREA_HEIGHT = 630
THING_COUNT = 5
SHADOW_COLOR = (0, 0, 0)
INVENTORY = Size(60, AREA_HEIGHT)
class NotWideEnoughException(Exception):
pass
class Game(tk.Frame):
def __init__(self, parent, width=AREA_HEIGHT, height=AREA_HEIGHT):
if width < AREA_HEIGHT:
raise NotWideEnoughException(
'Width must be greater than {}'.format(AREA_HEIGHT)
)
tk.Frame.__init__(self, parent)
tk.Frame.config(self, background='white')
self.init_frames(height)
self.player = things.Player()
self.new()
self.bind_events()
@property
def thing_size(self):
return AREA_HEIGHT / self.thing_count
@property
def thing_count(self):
return THING_COUNT + self.player.level * 2
def font(self, size=14, weight='bold'):
return tkFont.Font(
family='Courier',
size=size,
weight=weight,
name='font{}'.format(size)
)
def init_frames(self, height):
self.canvas = tk.Canvas(
self,
highlightthickness=0,
width=height,
height=height,
background='black'
)
self.inventory = tk.Canvas(
self,
highlightthickness=0,
width=INVENTORY.w,
height=INVENTORY.h,
background='black'
)
self.show_command()
self.inventory.pack(
side='left',
expand=True,
padx=1,
pady=2
)
self.canvas.pack(side='left', expand=True, padx=1, pady=2)
def new(self, seed=None):
self.seed = '{}-{}'.format (str(uuid4())[:8], self.player.level)
if seed is not None:
self.seed = seed
self.player.refresh()
self.canvas.delete('all')
self.area = Area(self.thing_count, self.thing_count, self.seed)
self.draw_area()
self.draw_inventory()
self.update_command()
def draw_area(self):
for y,row in enumerate(self.area.grid):
for x,col in enumerate(row):
thing = self.area.grid[y][x]
self.draw_thing(thing, x, y)
if isinstance(thing, things.FlagStart):
self.draw_thing(self.player, x, y)
self.canvas.tag_raise(self.player.element)
self.update_shadow()
def draw_thing(self, thing, x, y):
"""
raise AttributeError
assign thing to self.player if it is
an instance of things.Player
"""
# http://effbot.org/tkinterbook/canvas.htm#canvas.Canvas.create_image-method
# to use an image instead
thing.element = self.canvas.create_rectangle(
x * self.thing_size,
y * self.thing_size,
x * self.thing_size + self.thing_size,
y * self.thing_size + self.thing_size,
width=0,
fill='#{0:02x}{1:02x}{2:02x}'.format(*thing.COLOR)
)
def bind_events(self):
self.canvas.bind_all('<KeyPress-Up>', self.move_player)
self.canvas.bind_all('<KeyPress-Down>', self.move_player)
self.canvas.bind_all('<KeyPress-Left>', self.move_player)
self.canvas.bind_all('<KeyPress-Right>', self.move_player)
self.command_entry.bind('<Return>', self.do_command)
def move_player(self, event):
x = y = 0
if event.keysym == 'Up':
y = -1
if event.keysym == 'Down':
y = 1
if event.keysym == 'Left':
x = -1
if event.keysym == 'Right':
x = 1
if self.can_goto(x, y):
self.canvas.move(
self.player.element,
x * self.thing_size,
y * self.thing_size
)
self.player.move()
self.collect_item()
self.draw_inventory()
self.update_shadow()
if self.has_won():
self.player.level_up()
# self.draw_won_screen()
self.new()
def can_goto(self, x, y):
coords = self.get_thing_coords(self.player)
return not isinstance(
self.area.grid[coords[0] + y][coords[1] + x],
things.Block
)
def get_thing_coords(self, thing):
coords = map(round, self.canvas.coords(thing.element))
return (
int(coords[1] / self.thing_size),
int(coords[0] / self.thing_size)
)
def collect_item(self):
coords = self.get_thing_coords(self.player)
item = self.area.grid[coords[0]][coords[1]]
if self.player.collect(item):
self.remove_thing(item)
def remove_thing(self, thing):
coords = self.get_thing_coords(thing)
new_path = things.Path()
self.area.grid[coords[0]][coords[1]] = new_path
self.canvas.delete(thing.element)
self.draw_thing(new_path, coords[1], coords[0])
self.canvas.tag_raise(self.player.element)
def update_shadow(self):
try:
radius = self.player.light.radius
except AttributeError:
radius = 0
player_coords = self.get_thing_coords(self.player)
for y,row in enumerate(self.area.grid):
for x,v in enumerate(row):
thing = self.area.grid[x][y]
self.canvas.itemconfig(
thing.element,
fill='#{0:02x}{1:02x}{2:02x}'.format(
*color_variant(
thing.COLOR,
radius / distance(player_coords, (x,y)))
)
)
def has_won(self):
coords = self.get_thing_coords(self.player)
return isinstance(
self.area.grid[coords[0]][coords[1]],
things.FlagExit
)
def draw_inventory_thing(self, thing_class, count, slot):
self.inventory.create_rectangle(
0,
slot * INVENTORY.w,
INVENTORY.w,
slot * INVENTORY.w + INVENTORY.w,
width=0,
fill='#{0:02x}{1:02x}{2:02x}'.format(*thing_class.COLOR)
)
self.inventory.create_text(
INVENTORY.w / 2,
slot * INVENTORY.w + 9,
text=thing_class.__name__,
font=self.font(size=7, weight='normal')
)
self.inventory.create_text(
INVENTORY.w / 2,
slot * INVENTORY.w + INVENTORY.w / 2,
text=count,
font=self.font(size=14, weight='bold')
)
def draw_inventory(self):
self.inventory.delete('all')
thing_classes = [t.__class__ for t in self.player.inventory.unused]
for slot,thing in enumerate(Counter(thing_classes).items()):
self.draw_inventory_thing(*thing, slot=slot)
def show_command(self):
self.command_entry = tk.Entry(
self,
borderwidth=2,
relief='flat',
background='black',
foreground='white',
font=self.font(size=11, weight='normal'),
insertbackground='white'
)
self.command_entry.pack(side='top', fill='x', padx=0, pady=0)
def update_command(self):
self.command_entry.delete(0, 'end')
self.command_entry.insert(0, '> use {}'.format(self.seed))
def do_command(self, event):
cmd = self.command_entry.get()
cmd = cmd.replace('>', '').strip()
if cmd.startswith('use '):
seed = cmd.replace('use', '').strip()
level = re.search(r'(-[0-9]+)$', seed)
if level is not None:
self.player.level = int(level.group()[1:])
self.new(seed)
elif cmd == 'reload' or cmd == 'new':
self.new()
elif cmd == 'sun':
self.player.inventory.append(things.Sun(True))
self.draw_inventory()
elif cmd == 'black is black':
self.player.COLOR = (0, 0, 0)
# a bell ring, when approching it rings louder, to indicate direction
if __name__ == '__main__':
root = tk.Tk()
root.resizable(0, 0)
game = Game(root)
game.pack(side='left', padx=0, pady=0)
root.mainloop() | {
"content_hash": "e065d6de2e7833cb3b78ccec1368c176",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 78,
"avg_line_length": 24.14487632508834,
"alnum_prop": 0.6572515732474755,
"repo_name": "evuez/disclosure",
"id": "763b08d28e01a4e8fb2ea66ddf5ce455016f059e",
"size": "6858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15155"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, absolute_import
import os.path
import types
from app.libs.utils import load_module_attrs
def _filter(module):
if hasattr(module, 'urls') and isinstance(module.urls, types.ListType):
return getattr(module, 'urls')
path = os.path.abspath(os.path.dirname(__file__))
urls = load_module_attrs(path, _filter, True)
__all__ = ['urls']
| {
"content_hash": "295612b64eac7f9e68e88fae76187521",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 75,
"avg_line_length": 23.294117647058822,
"alnum_prop": 0.702020202020202,
"repo_name": "Damnever/2L",
"id": "8bb5abee37cb48883249bb0ab8d1b68fb0c2c528",
"size": "421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/services/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
[2018-01-19] Challenge #347 [Hard] Hue Drops Puzzle
https://www.reddit.com/r/dailyprogrammer/comments/7riu6p/20180119_challenge_347_hard_hue_drops_puzzle/
# Description
I found the game [Hue Drops](https://apkpure.com/hue-drops/aero.woi.huedrops) on a recent flight, turns out it's also a
mobile game. [One reviewer](https://www.kotaku.com.au/2017/06/every-game-in-my-delta-airplane-seat-reviewed/) described
it:
_You start with one dot, and you can change the colours of the adjacent dots. It's like playing with the paint bucket
tool in MS Paint! You slowly change the colour of the entire board one section at a time._
The puzzle opens with a group of tiles of six random colors. The tile in the upper left remains wild for you to change.
Tile colors change by flooding from the start tile to directly connected tiles in the four cardinal directions (not
diagonals). Directly connected tiles convert to the new color, allowing you to extend the size of the block. The puzzle
challenges you to sequentially change the color of the root tile until you grow the block of tiles to the target color
in 25 moves or fewer.
Today's challenge is to read a board tiled with six random colors (R O Y G B V), starting from the wild (W) tile in the
upper left corner and to produce a sequence of color changes
# Input Description
You'll be given a row of two integers telling you how many columns and rows to read. Then you'll be presented the board
(with those dimensions) as ASCII art, each tile color indicated by a single letter (including the wild tile as a W).
Then you'll be given the target color as a single uppercase letter. Example:
4 4
W O O O
B G V R
R G B G
V O B R
O
# Output Description
Your program should emit the sequence of colors to change the puzzle to achieve the target color. Remember, you have
only 25 moves maximum in which to solve the puzzle. Note that puzzles may have more than one solution. Example:
O G O B R V G R O
# Challenge Input
10 12
W Y O B V G V O Y B
G O O V R V R G O R
V B R R R B R B G Y
B O Y R R G Y V O V
V O B O R G B R G R
B O G Y Y G O V R V
O O G O Y R O V G G
B O O V G Y V B Y G
R B G V O R Y G G G
Y R Y B R O V O B V
O B O B Y O Y V B O
V R R G V V G V V G
V
"""
def main():
pass
if __name__ == "__main__":
main()
| {
"content_hash": "4de05639c41a1550a02cee1a8dbb2e1b",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 119,
"avg_line_length": 42.25,
"alnum_prop": 0.7066779374471682,
"repo_name": "DayGitH/Python-Challenges",
"id": "f3a92d135a14edfe3899334665368b2b255d1c9a",
"size": "2366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DailyProgrammer/DP20180119C.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "5002"
},
{
"name": "Python",
"bytes": "2471582"
}
],
"symlink_target": ""
} |
import logging
import telegram
import urllib
def main():
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
bot = telegram.Bot('136940855:AAGaZVK07Mocwz4NVabKysOuWEvJwGvEpT0') # Telegram Bot Authorization Token
LAST_UPDATE_ID = bot.getUpdates()[-1].update_id # Get lastest update
while True:
for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10):
text = update.message.text
chat_id = update.message.chat.id
update_id = update.update_id
if text:
roboed = ed(text) # Ask something to Robô Ed
bot.sendMessage(chat_id=chat_id, text=roboed)
LAST_UPDATE_ID = update_id + 1
def ed(text):
# Do something with it
return text
if __name__ == '__main__':
main() | {
"content_hash": "dc7048d7d937c56a6103087a6a4798c0",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 107,
"avg_line_length": 30.035714285714285,
"alnum_prop": 0.6076099881093936,
"repo_name": "asivokon/ailas",
"id": "b31f679c0a7fc4048fdde15043a7c6e3bb87893e",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telegram_bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2665"
},
{
"name": "HTML",
"bytes": "5465"
},
{
"name": "JavaScript",
"bytes": "3307"
},
{
"name": "Makefile",
"bytes": "69"
},
{
"name": "Python",
"bytes": "28196"
}
],
"symlink_target": ""
} |
from six import with_metaclass
from django import template
from oscar.core.loading import get_model
register = template.Library()
Category = get_model("catalogue", "category")
class PassThrough(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, objtype):
if obj is None:
return self
return getattr(obj.category, self.name)
class CategoryFieldPassThroughMetaClass(type):
"""
Add accessors for category fields to whichever class is of this type.
"""
def __new__(cls, name, bases, attrs):
field_accessors = {}
for field in Category._meta.get_fields():
name = field.name
field_accessors[name] = PassThrough(name)
# attrs win of silly field accessors
field_accessors.update(attrs)
return type.__new__(cls, name, bases, field_accessors)
class CheapCategoryInfo(with_metaclass(CategoryFieldPassThroughMetaClass, dict)):
"""
Wrapper class for Category.
Besides allowing inclusion of extra info, useful while rendering a template,
this class hides any expensive properties people should not use by accident
in templates.
This replaces both the node as the info object returned by the ``category_tree``
templatetag, so it mimics a tuple of 2 items (which are the same) for
backwards compatibility.
"""
def __init__(self, category, **info):
super().__init__(info)
self.category = category
@property
def pk(self):
return self.category.pk
def get_absolute_url(self):
return self["url"]
def __len__(self):
"Mimic a tuple of 2 items"
return 2
def __iter__(self):
"be an iterable of 2 times the same item"
yield self
yield self
@register.simple_tag(name="category_tree") # noqa: C901 too complex
def get_annotated_list(depth=None, parent=None):
"""
Gets an annotated list from a tree branch.
Borrows heavily from treebeard's get_annotated_list
"""
# 'depth' is the backwards-compatible name for the template tag,
# 'max_depth' is the better variable name.
max_depth = depth
annotated_categories = []
tree_slug = ""
start_depth, prev_depth = (None, None)
if parent:
categories = parent.get_descendants()
tree_slug = parent.get_full_slug()
if max_depth is not None:
max_depth += parent.get_depth()
else:
categories = Category.get_tree()
if max_depth is not None:
categories = categories.filter(depth__lte=max_depth)
info = CheapCategoryInfo(parent, url="")
for node in categories:
node_depth = node.get_depth()
if start_depth is None:
start_depth = node_depth
# Update previous node's info
if prev_depth is None or node_depth > prev_depth:
info["has_children"] = True
if info.category is not None:
tree_slug = info.category.get_full_slug(tree_slug)
if prev_depth is not None and node_depth < prev_depth:
depth_difference = prev_depth - node_depth
info["num_to_close"] = list(range(0, depth_difference))
tree_slugs = tree_slug.rsplit(node._slug_separator, depth_difference)
if tree_slugs:
tree_slug = tree_slugs[0]
else:
tree_slug = node.slug
info = CheapCategoryInfo(
node,
url=node._get_absolute_url(tree_slug),
num_to_close=[],
level=node_depth - start_depth,
)
annotated_categories.append(info)
prev_depth = node_depth
if prev_depth is not None:
# close last leaf
info['num_to_close'] = list(range(0, prev_depth - start_depth))
info['has_children'] = prev_depth > prev_depth
return annotated_categories
| {
"content_hash": "7bae28e83cfa1e4c085ed05786a21c0a",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 84,
"avg_line_length": 29.398496240601503,
"alnum_prop": 0.6153452685421995,
"repo_name": "sasha0/django-oscar",
"id": "69acaf23e6b0ae9bbdc0a0d1262da608ae2b7922",
"size": "3910",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/oscar/templatetags/category_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "387941"
},
{
"name": "Dockerfile",
"bytes": "544"
},
{
"name": "HTML",
"bytes": "518624"
},
{
"name": "JavaScript",
"bytes": "344864"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "1957797"
},
{
"name": "Shell",
"bytes": "1643"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from conans.errors import ConanException
from conans.model.ref import ConanFileReference
import six
class Requirement(object):
""" A reference to a package plus some attributes of how to
depend on that package
"""
def __init__(self, conan_reference, private=False, override=False, dev=False):
"""
param override: True means that this is not an actual requirement, but something to
be passed upstream and override possible existing values
param private: True means that this requirement will be somewhat embedded (like
a static lib linked into a shared lib), so it is not required to link
param dev: True means that this requirement is only needed at dev time, e.g. only
needed for building or testing, but not affects the package hash at all
"""
self.conan_reference = conan_reference
self.range_reference = conan_reference
self.private = private
self.override = override
self.dev = dev
@property
def version_range(self):
""" returns the version range expression, without brackets []
or None if it is not an expression
"""
version = self.range_reference.version
if version.startswith("[") and version.endswith("]"):
return version[1:-1]
@property
def is_resolved(self):
""" returns True if the version_range reference has been already resolved to a
concrete reference
"""
return self.conan_reference != self.range_reference
def __repr__(self):
return ("%s" % str(self.conan_reference) + (" P" if self.private else ""))
def __eq__(self, other):
return (self.override == other.override and
self.conan_reference == other.conan_reference and
self.private == other.private and
self.dev == other.dev)
def __ne__(self, other):
return not self.__eq__(other)
class Requirements(OrderedDict):
""" {name: Requirement} in order, e.g. {"Hello": Requirement for Hello}
"""
def __init__(self, *args):
super(Requirements, self).__init__()
self.allow_dev = False
for v in args:
if isinstance(v, tuple):
override = private = dev = False
ref = v[0]
for elem in v[1:]:
if elem == "override":
override = True
elif elem == "private":
private = True
else:
raise ConanException("Unknown requirement config %s" % elem)
self.add(ref, private=private, override=override, dev=dev)
else:
self.add(v)
def add_dev(self, *args):
for v in args:
if isinstance(v, tuple):
override = private = False
ref = v[0]
for elem in v[1:]:
if elem == "override":
override = True
elif elem == "private":
private = True
else:
raise ConanException("Unknown requirement config %s" % elem)
self.add(ref, private=private, override=override, dev=True)
else:
self.add(v, dev=True)
def copy(self):
""" We need a custom copy as the normal one requires __init__ to be
properly defined. This is not a deep-copy, in fact, requirements in the dict
are changed by RequireResolver, and are propagated upstream
"""
result = Requirements()
for name, req in self.items():
result[name] = req
return result
def iteritems(self): # FIXME: Just a trick to not change default testing conanfile for py3
return self.items()
def add(self, reference, private=False, override=False, dev=False):
""" to define requirements by the user in text, prior to any propagation
"""
assert isinstance(reference, six.string_types)
if dev and not self.allow_dev:
return
conan_reference = ConanFileReference.loads(reference)
name = conan_reference.name
new_requirement = Requirement(conan_reference, private, override, dev)
old_requirement = self.get(name)
if old_requirement and old_requirement != new_requirement:
raise ConanException("Duplicated requirement %s != %s"
% (old_requirement, new_requirement))
else:
self[name] = new_requirement
def update(self, down_reqs, output, own_ref, down_ref):
""" Compute actual requirement values when downstream values are defined
param down_reqs: the current requirements as coming from downstream to override
current requirements
param own_ref: ConanFileReference of the current conanfile
param down_ref: ConanFileReference of the downstream that is overriding values or None
return: new Requirements() value to be passed upstream
"""
assert isinstance(down_reqs, Requirements)
assert isinstance(own_ref, ConanFileReference) if own_ref else True
assert isinstance(down_ref, ConanFileReference) if down_ref else True
new_reqs = down_reqs.copy()
if own_ref:
new_reqs.pop(own_ref.name, None)
for name, req in self.items():
if req.private or req.dev:
continue
if name in down_reqs:
other_req = down_reqs[name]
# update dependency
other_ref = other_req.conan_reference
if other_ref and other_ref != req.conan_reference:
output.info("%s requirement %s overriden by %s to %s "
% (own_ref, req.conan_reference, down_ref or "your conanfile",
other_ref))
req.conan_reference = other_ref
new_reqs[name] = req
return new_reqs
def __call__(self, conan_reference, private=False, override=False, dev=False):
self.add(conan_reference, private, override, dev)
def __repr__(self):
result = []
for req in self.values():
result.append(str(req))
return '\n'.join(result)
| {
"content_hash": "721307a305fa3eda307cc4d5260f1f68",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 95,
"avg_line_length": 39.79754601226994,
"alnum_prop": 0.5729921381223987,
"repo_name": "mropert/conan",
"id": "5c7a221262e4ddbfdd1ee7265d7928becc4ff24c",
"size": "6487",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/model/requires.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "653"
},
{
"name": "Python",
"bytes": "1898890"
},
{
"name": "Shell",
"bytes": "1342"
}
],
"symlink_target": ""
} |
import logging; log = logging.getLogger(__name__)
# Try to load JSON libraries in this order:
# ujson -> simplejson -> json
try:
import ujson as json
except ImportError:
try:
import simplejson as json
except ImportError:
import json
import cStringIO as StringIO
import inspect
import math
import time
import urllib
import sys
# 3rd party libraries that might not be present during initial install
# but we need to import for the version #
try:
import requests
# Monkey patch to requests' json using ujson when available;
# Otherwise it wouldn't affect anything
requests.models.json = json
except ImportError:
pass
# Helpful for debugging what goes in and out
NETWORK_DEBUG = False
if NETWORK_DEBUG:
# These two lines enable debugging at httplib level (requests->urllib3->httplib)
# You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# The only thing missing will be the response.body which is not logged.
import httplib
httplib.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# Default API version. Move this forward as the library is maintained and kept current
API_VERSION_YEAR = '2014'
API_VERSION_MONTH = '04'
API_VERSION_DAY = '23'
API_VERSION = '{year}{month}{day}'.format(year=API_VERSION_YEAR, month=API_VERSION_MONTH, day=API_VERSION_DAY)
# Library versioning matches supported foursquare API version
__version__ = '{year}.{month}.{day}'.format(year=API_VERSION_YEAR, month=API_VERSION_MONTH, day=API_VERSION_DAY)
__author__ = u'Mike Lewis'
AUTH_ENDPOINT = 'https://foursquare.com/oauth2/authenticate'
TOKEN_ENDPOINT = 'https://foursquare.com/oauth2/access_token'
API_ENDPOINT = 'https://api.foursquare.com/v2'
# Number of times to retry http requests
NUM_REQUEST_RETRIES = 3
# Max number of sub-requests per multi request
MAX_MULTI_REQUESTS = 5
# Change this if your Python distribution has issues with Foursquare's SSL cert
VERIFY_SSL = True
# Generic foursquare exception
class FoursquareException(Exception): pass
# Specific exceptions
class InvalidAuth(FoursquareException): pass
class ParamError(FoursquareException): pass
class EndpointError(FoursquareException): pass
class NotAuthorized(FoursquareException): pass
class RateLimitExceeded(FoursquareException): pass
class Deprecated(FoursquareException): pass
class ServerError(FoursquareException): pass
class FailedGeocode(FoursquareException): pass
class Other(FoursquareException): pass
error_types = {
'invalid_auth': InvalidAuth,
'param_error': ParamError,
'endpoint_error': EndpointError,
'not_authorized': NotAuthorized,
'rate_limit_exceeded': RateLimitExceeded,
'deprecated': Deprecated,
'server_error': ServerError,
'failed_geocode': FailedGeocode,
'other': Other,
}
class Foursquare(object):
"""foursquare V2 API wrapper"""
def __init__(self, client_id=None, client_secret=None, access_token=None, redirect_uri=None, version=None, lang=None):
"""Sets up the api object"""
# Set up OAuth
self.oauth = self.OAuth(client_id, client_secret, redirect_uri)
# Set up endpoints
self.base_requester = self.Requester(client_id, client_secret, access_token, version, lang)
# Dynamically enable endpoints
self._attach_endpoints()
def _attach_endpoints(self):
"""Dynamically attach endpoint callables to this client"""
for name, endpoint in inspect.getmembers(self):
if inspect.isclass(endpoint) and issubclass(endpoint, self._Endpoint) and (endpoint is not self._Endpoint):
endpoint_instance = endpoint(self.base_requester)
setattr(self, endpoint_instance.endpoint, endpoint_instance)
def set_access_token(self, access_token):
"""Update the access token to use"""
self.base_requester.set_token(access_token)
@property
def rate_limit(self):
"""Returns the maximum rate limit for the last API call i.e. X-RateLimit-Limit"""
return self.base_requester.rate_limit
@property
def rate_remaining(self):
"""Returns the remaining rate limit for the last API call i.e. X-RateLimit-Remaining"""
return self.base_requester.rate_remaining
class OAuth(object):
"""Handles OAuth authentication procedures and helps retrieve tokens"""
def __init__(self, client_id, client_secret, redirect_uri):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
def auth_url(self):
"""Gets the url a user needs to access to give up a user token"""
params = {
'client_id': self.client_id,
'response_type': u'code',
'redirect_uri': self.redirect_uri,
}
return '{AUTH_ENDPOINT}?{params}'.format(
AUTH_ENDPOINT=AUTH_ENDPOINT,
params=urllib.urlencode(params))
def get_token(self, code):
"""Gets the auth token from a user's response"""
if not code:
log.error(u'Code not provided')
return None
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': u'authorization_code',
'redirect_uri': self.redirect_uri,
'code': unicode(code),
}
# Get the response from the token uri and attempt to parse
return _get(TOKEN_ENDPOINT, params=params)['data']['access_token']
class Requester(object):
"""Api requesting object"""
def __init__(self, client_id=None, client_secret=None, access_token=None, version=None, lang=None):
"""Sets up the api object"""
self.client_id = client_id
self.client_secret = client_secret
self.set_token(access_token)
self.version = version if version else API_VERSION
self.lang = lang
self.multi_requests = list()
self.rate_limit = None
self.rate_remaining = None
def set_token(self, access_token):
"""Set the OAuth token for this requester"""
self.oauth_token = access_token
self.userless = not bool(access_token) # Userless if no access_token
def GET(self, path, params={}, **kwargs):
"""GET request that returns processed data"""
params = params.copy()
# Short-circuit multi requests
if kwargs.get('multi') is True:
return self.add_multi_request(path, params)
# Continue processing normal requests
headers = self._create_headers()
params = self._enrich_params(params)
url = '{API_ENDPOINT}{path}'.format(
API_ENDPOINT=API_ENDPOINT,
path=path
)
result = _get(url, headers=headers, params=params)
self.rate_limit = result['headers']['X-RateLimit-Limit']
self.rate_remaining = result['headers']['X-RateLimit-Remaining']
return result['data']['response']
def add_multi_request(self, path, params={}):
"""Add multi request to list and return the number of requests added"""
url = path
if params:
# First convert the params into a query string then quote the whole string
# so it will fit into the multi request query -as a value for the requests= query param-
url += '?{0}'.format(urllib.quote_plus(urllib.urlencode(params)))
self.multi_requests.append(url)
return len(self.multi_requests)
def POST(self, path, data={}, files=None):
"""POST request that returns processed data"""
if data is not None:
data = data.copy()
if files is not None:
files = files.copy()
headers = self._create_headers()
data = self._enrich_params(data)
url = '{API_ENDPOINT}{path}'.format(
API_ENDPOINT=API_ENDPOINT,
path=path
)
result = _post(url, headers=headers, data=data, files=files)
self.rate_limit = result['headers']['X-RateLimit-Limit']
self.rate_remaining = result['headers']['X-RateLimit-Remaining']
return result['data']['response']
def _enrich_params(self, params):
"""Enrich the params dict"""
if self.version:
params['v'] = self.version
if self.userless:
params['client_id'] = self.client_id
params['client_secret'] = self.client_secret
else:
params['oauth_token'] = self.oauth_token
return params
def _create_headers(self):
"""Get the headers we need"""
headers = {}
# If we specified a specific language, use that
if self.lang:
headers['Accept-Language'] = self.lang
return headers
class _Endpoint(object):
"""Generic endpoint class"""
def __init__(self, requester):
"""Stores the request function for retrieving data"""
self.requester = requester
def _expanded_path(self, path=None):
"""Gets the expanded path, given this endpoint"""
return '/{expanded_path}'.format(
expanded_path='/'.join(p for p in (self.endpoint, path) if p)
)
def GET(self, path=None, *args, **kwargs):
"""Use the requester to get the data"""
return self.requester.GET(self._expanded_path(path), *args, **kwargs)
def POST(self, path=None, *args, **kwargs):
"""Use the requester to post the data"""
return self.requester.POST(self._expanded_path(path), *args, **kwargs)
class Users(_Endpoint):
"""User specific endpoint"""
endpoint = 'users'
def __call__(self, USER_ID=u'self', multi=False):
"""https://developer.foursquare.com/docs/users/users"""
return self.GET('{USER_ID}'.format(USER_ID=USER_ID), multi=multi)
"""
General
"""
def leaderboard(self, params={}, multi=False):
"""https://developer.foursquare.com/docs/users/leaderboard"""
return self.GET('leaderboard', params, multi=multi)
def requests(self, multi=False):
"""https://developer.foursquare.com/docs/users/requests"""
return self.GET('requests', multi=multi)
def search(self, params, multi=False):
"""https://developer.foursquare.com/docs/users/search"""
return self.GET('search', params, multi=multi)
"""
Aspects
"""
def badges(self, USER_ID=u'self', multi=False):
"""https://developer.foursquare.com/docs/users/badges"""
return self.GET('{USER_ID}/badges'.format(USER_ID=USER_ID), multi=multi)
def checkins(self, USER_ID=u'self', params={}, multi=False):
"""https://developer.foursquare.com/docs/users/checkins"""
return self.GET('{USER_ID}/checkins'.format(USER_ID=USER_ID), params, multi=multi)
def all_checkins(self, USER_ID=u'self'):
"""Utility function: Get every checkin this user has ever made"""
offset = 0
while(True):
checkins = self.checkins(USER_ID=USER_ID, params={'limit': 250, 'offset': offset})
# Yield out each checkin
for checkin in checkins['checkins']['items']:
yield checkin
# Determine if we should stop here or query again
offset += len(checkins['checkins']['items'])
if (offset >= checkins['checkins']['count']) or (len(checkins['checkins']['items']) == 0):
# Break once we've processed everything
break
def friends(self, USER_ID=u'self', params={}, multi=False):
"""https://developer.foursquare.com/docs/users/friends"""
return self.GET('{USER_ID}/friends'.format(USER_ID=USER_ID), params, multi=multi)
def lists(self, USER_ID=u'self', params={}, multi=False):
"""https://developer.foursquare.com/docs/users/lists"""
return self.GET('{USER_ID}/lists'.format(USER_ID=USER_ID), params, multi=multi)
def mayorships(self, USER_ID=u'self', params={}, multi=False):
"""https://developer.foursquare.com/docs/users/mayorships"""
return self.GET('{USER_ID}/mayorships'.format(USER_ID=USER_ID), params, multi=multi)
def photos(self, USER_ID=u'self', params={}, multi=False):
"""https://developer.foursquare.com/docs/users/photos"""
return self.GET('{USER_ID}/photos'.format(USER_ID=USER_ID), params, multi=multi)
def venuehistory(self, USER_ID=u'self', params={}, multi=False):
"""https://developer.foursquare.com/docs/users/venuehistory"""
return self.GET('{USER_ID}/venuehistory'.format(USER_ID=USER_ID), params, multi=multi)
"""
Actions
"""
def approve(self, USER_ID):
"""https://developer.foursquare.com/docs/users/approve"""
return self.POST('{USER_ID}/approve'.format(USER_ID=USER_ID))
def deny(self, USER_ID):
"""https://developer.foursquare.com/docs/users/deny"""
return self.POST('{USER_ID}/deny'.format(USER_ID=USER_ID))
def request(self, USER_ID):
"""https://developer.foursquare.com/docs/users/request"""
return self.POST('{USER_ID}/request'.format(USER_ID=USER_ID))
def setpings(self, USER_ID, params):
"""https://developer.foursquare.com/docs/users/setpings"""
return self.POST('{USER_ID}/setpings'.format(USER_ID=USER_ID), params)
def unfriend(self, USER_ID):
"""https://developer.foursquare.com/docs/users/unfriend"""
return self.POST('{USER_ID}/unfriend'.format(USER_ID=USER_ID))
def update(self, params={}, photo_data=None, photo_content_type='image/jpeg'):
"""https://developer.foursquare.com/docs/users/update"""
if photo_data:
files = { 'photo': ('photo', photo_data, photo_content_type) }
else:
files = None
return self.POST('self/update', data=params, files=files)
class Venues(_Endpoint):
"""Venue specific endpoint"""
endpoint = 'venues'
"""
General
"""
def __call__(self, VENUE_ID, multi=False):
"""https://developer.foursquare.com/docs/venues/venues"""
return self.GET('{VENUE_ID}'.format(VENUE_ID=VENUE_ID), multi=multi)
def add(self, params):
"""https://developer.foursquare.com/docs/venues/add"""
return self.POST('add', params)
def categories(self, multi=False):
"""https://developer.foursquare.com/docs/venues/categories"""
return self.GET('categories', multi=multi)
def explore(self, params, multi=False):
"""https://developer.foursquare.com/docs/venues/explore"""
return self.GET('explore', params, multi=multi)
def managed(self, multi=False):
"""https://developer.foursquare.com/docs/venues/managed"""
return self.GET('managed', multi=multi)
MAX_SEARCH_LIMIT = 50
def search(self, params, multi=False):
"""https://developer.foursquare.com/docs/venues/search"""
return self.GET('search', params, multi=multi)
def suggestcompletion(self, params, multi=False):
"""https://developer.foursquare.com/docs/venues/suggestcompletion"""
return self.GET('suggestcompletion', params, multi=multi)
def trending(self, params, multi=False):
"""https://developer.foursquare.com/docs/venues/trending"""
return self.GET('trending', params, multi=multi)
"""
Aspects
"""
def events(self, VENUE_ID, multi=False):
"""https://developer.foursquare.com/docs/venues/events"""
return self.GET('{VENUE_ID}/events'.format(VENUE_ID=VENUE_ID), multi=multi)
def herenow(self, VENUE_ID, params={}, multi=False):
"""https://developer.foursquare.com/docs/venues/herenow"""
return self.GET('{VENUE_ID}/herenow'.format(VENUE_ID=VENUE_ID), params, multi=multi)
def links(self, VENUE_ID, params={}, multi=False):
"""https://developer.foursquare.com/docs/venues/links"""
return self.GET('{VENUE_ID}/links'.format(VENUE_ID=VENUE_ID), params, multi=multi)
def listed(self, VENUE_ID, params={}, multi=False):
"""https://developer.foursquare.com/docs/venues/listed"""
return self.GET('{VENUE_ID}/listed'.format(VENUE_ID=VENUE_ID), params, multi=multi)
def menu(self, VENUE_ID, params={}, multi=False):
"""https://developer.foursquare.com/docs/venues/menu"""
return self.GET('{VENUE_ID}/menu'.format(VENUE_ID=VENUE_ID), params, multi=multi)
def photos(self, VENUE_ID, params, multi=False):
"""https://developer.foursquare.com/docs/venues/photos"""
return self.GET('{VENUE_ID}/photos'.format(VENUE_ID=VENUE_ID), params, multi=multi)
def similar(self, VENUE_ID, multi=False):
"""https://developer.foursquare.com/docs/venues/similar"""
return self.GET('{VENUE_ID}/similar'.format(VENUE_ID=VENUE_ID), multi=multi)
def stats(self, VENUE_ID, multi=False):
"""https://developer.foursquare.com/docs/venues/stats"""
return self.GET('{VENUE_ID}/stats'.format(VENUE_ID=VENUE_ID), multi=multi)
def tips(self, VENUE_ID, params={}, multi=False):
"""https://developer.foursquare.com/docs/venues/tips"""
return self.GET('{VENUE_ID}/tips'.format(VENUE_ID=VENUE_ID), params, multi=multi)
def nextvenues(self, VENUE_ID, params={}, multi=False):
"""https://developer.foursquare.com/docs/venues/nextvenues"""
return self.GET('{VENUE_ID}/nextvenues'.format(VENUE_ID=VENUE_ID), params, multi=multi)
def likes(self, VENUE_ID, params={}, multi=False):
"""https://developer.foursquare.com/docs/venues/likes"""
return self.GET('{VENUE_ID}/likes'.format(VENUE_ID=VENUE_ID), params, multi=multi)
def hours(self, VENUE_ID, params={}, multi=False):
"""https://developer.foursquare.com/docs/venues/hours"""
return self.GET('{VENUE_ID}/hours'.format(VENUE_ID=VENUE_ID), params, multi=multi)
"""
Actions
"""
def edit(self, VENUE_ID, params={}):
"""https://developer.foursquare.com/docs/venues/edit"""
return self.POST('{VENUE_ID}/edit'.format(VENUE_ID=VENUE_ID), params)
def flag(self, VENUE_ID, params):
"""https://developer.foursquare.com/docs/venues/flag"""
return self.POST('{VENUE_ID}/flag'.format(VENUE_ID=VENUE_ID), params)
def marktodo(self, VENUE_ID, params={}):
"""https://developer.foursquare.com/docs/venues/marktodo"""
return self.POST('{VENUE_ID}/marktodo'.format(VENUE_ID=VENUE_ID), params)
def proposeedit(self, VENUE_ID, params):
"""https://developer.foursquare.com/docs/venues/proposeedit"""
return self.POST('{VENUE_ID}/proposeedit'.format(VENUE_ID=VENUE_ID), params)
def setrole(self, VENUE_ID, params):
"""https://developer.foursquare.com/docs/venues/setrole"""
return self.POST('{VENUE_ID}/setrole'.format(VENUE_ID=VENUE_ID), params)
class Checkins(_Endpoint):
"""Checkin specific endpoint"""
endpoint = 'checkins'
def __call__(self, CHECKIN_ID, params={}, multi=False):
"""https://developer.foursquare.com/docs/checkins/checkins"""
return self.GET('{CHECKIN_ID}'.format(CHECKIN_ID=CHECKIN_ID), params, multi=multi)
def add(self, params):
"""https://developer.foursquare.com/docs/checkins/add"""
return self.POST('add', params)
def recent(self, params={}, multi=False):
"""https://developer.foursquare.com/docs/checkins/recent"""
return self.GET('recent', params, multi=multi)
"""
Actions
"""
def addcomment(self, CHECKIN_ID, params):
"""https://developer.foursquare.com/docs/checkins/addcomment"""
return self.POST('{CHECKIN_ID}/addcomment'.format(CHECKIN_ID=CHECKIN_ID), params)
def addpost(self, CHECKIN_ID, params):
"""https://developer.foursquare.com/docs/checkins/addpost"""
return self.POST('{CHECKIN_ID}/addpost'.format(CHECKIN_ID=CHECKIN_ID), params)
def deletecomment(self, CHECKIN_ID, params):
"""https://developer.foursquare.com/docs/checkins/deletecomment"""
return self.POST('{CHECKIN_ID}/deletecomment'.format(CHECKIN_ID=CHECKIN_ID), params)
def reply(self, CHECKIN_ID, params):
"""https://developer.foursquare.com/docs/checkins/reply"""
return self.POST('{CHECKIN_ID}/reply'.format(CHECKIN_ID=CHECKIN_ID), params)
class Tips(_Endpoint):
"""Tips specific endpoint"""
endpoint = 'tips'
def __call__(self, TIP_ID, multi=False):
"""https://developer.foursquare.com/docs/tips/tips"""
return self.GET('{TIP_ID}'.format(TIP_ID=TIP_ID), multi=multi)
def add(self, params):
"""https://developer.foursquare.com/docs/tips/add"""
return self.POST('add', params)
def search(self, params, multi=False):
"""https://developer.foursquare.com/docs/tips/add"""
return self.GET('search', params, multi=multi)
"""
Aspects
"""
def done(self, TIP_ID, params={}, multi=False):
"""https://developer.foursquare.com/docs/tips/done"""
return self.GET('{TIP_ID}/done'.format(TIP_ID=TIP_ID), params, multi=multi)
def listed(self, TIP_ID, params={}, multi=False):
"""https://developer.foursquare.com/docs/tips/listed"""
return self.GET('{TIP_ID}/listed'.format(TIP_ID=TIP_ID), params, multi=multi)
"""
Actions
"""
def markdone(self, TIP_ID):
"""https://developer.foursquare.com/docs/tips/markdone"""
return self.POST('{TIP_ID}/markdone'.format(TIP_ID=TIP_ID))
def marktodo(self, TIP_ID):
"""https://developer.foursquare.com/docs/tips/marktodo"""
return self.POST('{TIP_ID}/marktodo'.format(TIP_ID=TIP_ID))
def unmark(self, TIP_ID):
"""https://developer.foursquare.com/docs/tips/unmark"""
return self.POST('{TIP_ID}/unmark'.format(TIP_ID=TIP_ID))
class Lists(_Endpoint):
"""Lists specific endpoint"""
endpoint = 'lists'
def __call__(self, LIST_ID, params={}, multi=False):
"""https://developer.foursquare.com/docs/lists/lists"""
return self.GET('{LIST_ID}'.format(LIST_ID=LIST_ID), params, multi=multi)
def add(self, params):
"""https://developer.foursquare.com/docs/lists/add"""
return self.POST('add', params)
"""
Aspects
"""
def followers(self, LIST_ID, multi=False):
"""https://developer.foursquare.com/docs/lists/followers"""
return self.GET('{LIST_ID}/followers'.format(LIST_ID=LIST_ID), multi=multi)
def suggestphoto(self, LIST_ID, params, multi=False):
"""https://developer.foursquare.com/docs/lists/suggestphoto"""
return self.GET('{LIST_ID}/suggestphoto'.format(LIST_ID=LIST_ID), params, multi=multi)
def suggesttip(self, LIST_ID, params, multi=False):
"""https://developer.foursquare.com/docs/lists/suggesttip"""
return self.GET('{LIST_ID}/suggesttip'.format(LIST_ID=LIST_ID), params, multi=multi)
def suggestvenues(self, LIST_ID, multi=False):
"""https://developer.foursquare.com/docs/lists/suggestvenues"""
return self.GET('{LIST_ID}/suggestvenues'.format(LIST_ID=LIST_ID), multi=multi)
"""
Actions
"""
def additem(self, LIST_ID, params):
"""https://developer.foursquare.com/docs/lists/additem"""
return self.POST('{LIST_ID}/additem'.format(LIST_ID=LIST_ID), params)
def deleteitem(self, LIST_ID, params):
"""https://developer.foursquare.com/docs/lists/deleteitem"""
return self.POST('{LIST_ID}/deleteitem'.format(LIST_ID=LIST_ID), params)
def follow(self, LIST_ID):
"""https://developer.foursquare.com/docs/lists/follow"""
return self.POST('{LIST_ID}/follow'.format(LIST_ID=LIST_ID))
def moveitem(self, LIST_ID, params):
"""https://developer.foursquare.com/docs/lists/moveitem"""
return self.POST('{LIST_ID}/moveitem'.format(LIST_ID=LIST_ID), params)
def share(self, LIST_ID, params):
"""https://developer.foursquare.com/docs/lists/share"""
return self.POST('{LIST_ID}/share'.format(LIST_ID=LIST_ID), params)
def unfollow(self, LIST_ID):
"""https://developer.foursquare.com/docs/tips/unfollow"""
return self.POST('{LIST_ID}/unfollow'.format(LIST_ID=LIST_ID))
def update(self, LIST_ID, params):
"""https://developer.foursquare.com/docs/tips/update"""
return self.POST('{LIST_ID}/update'.format(LIST_ID=LIST_ID), params)
def updateitem(self, LIST_ID, params):
"""https://developer.foursquare.com/docs/tips/updateitem"""
return self.POST('{LIST_ID}/updateitem'.format(LIST_ID=LIST_ID), params)
class Photos(_Endpoint):
"""Photo specific endpoint"""
endpoint = 'photos'
def __call__(self, PHOTO_ID, multi=False):
"""https://developer.foursquare.com/docs/photos/photos"""
return self.GET('{PHOTO_ID}'.format(PHOTO_ID=PHOTO_ID), multi=multi)
def add(self, photo_data, params, photo_content_type='image/jpeg'):
"""https://developer.foursquare.com/docs/photos/add"""
files = { 'photo': ('photo', photo_data, photo_content_type) }
return self.POST('add', data=params, files=files)
class Settings(_Endpoint):
"""Setting specific endpoint"""
endpoint = 'settings'
def __call__(self, SETTING_ID, multi=False):
"""https://developer.foursquare.com/docs/settings/settings"""
return self.GET('{SETTING_ID}'.format(SETTING_ID=SETTING_ID), multi=multi)
def all(self, multi=False):
"""https://developer.foursquare.com/docs/settings/all"""
return self.GET('all', multi=multi)
"""
Actions
"""
def set(self, SETTING_ID, params):
"""https://developer.foursquare.com/docs/settings/set"""
return self.POST('{SETTING_ID}/set'.format(SETTING_ID=SETTING_ID), params)
class Specials(_Endpoint):
"""Specials specific endpoint"""
endpoint = 'specials'
def __call__(self, SPECIAL_ID, params, multi=False):
"""https://developer.foursquare.com/docs/specials/specials"""
return self.GET('{SPECIAL_ID}'.format(SPECIAL_ID=SPECIAL_ID), params, multi=multi)
def search(self, params, multi=False):
"""https://developer.foursquare.com/docs/specials/search"""
return self.GET('search', params, multi=multi)
"""
Actions
"""
def add(self, SPECIAL_ID, params):
"""https://developer.foursquare.com/docs/specials/add"""
return self.POST('add', params)
def flag(self, SPECIAL_ID, params):
"""https://developer.foursquare.com/docs/specials/flag"""
return self.POST('{SPECIAL_ID}/flag'.format(SPECIAL_ID=SPECIAL_ID), params)
class Events(_Endpoint):
"""Events specific endpoint"""
endpoint = 'events'
def __call__(self, EVENT_ID, multi=False):
"""https://developer.foursquare.com/docs/events/events"""
return self.GET('{EVENT_ID}'.format(EVENT_ID=EVENT_ID), multi=multi)
def categories(self, multi=False):
"""https://developer.foursquare.com/docs/events/categories"""
return self.GET('categories', multi=multi)
def search(self, params, multi=False):
"""https://developer.foursquare.com/docs/events/search"""
return self.GET('search', params, multi=multi)
class Pages(_Endpoint):
"""Pages specific endpoint"""
endpoint = 'pages'
def __call__(self, USER_ID, multi=False):
"""https://developer.foursquare.com/docs/pages/pages"""
return self.GET('{USER_ID}'.format(USER_ID=USER_ID), multi=multi)
def search(self, params, multi=False):
"""https://developer.foursquare.com/docs/pages/search"""
return self.GET('search', params, multi=multi)
def venues(self, PAGE_ID, params={}, multi=False):
"""https://developer.foursquare.com/docs/pages/venues"""
return self.GET('{PAGE_ID}/venues'.format(PAGE_ID=PAGE_ID), params, multi=multi)
class Multi(_Endpoint):
"""Multi request endpoint handler"""
endpoint = 'multi'
def __len__(self):
return len(self.requester.multi_requests)
def __call__(self):
"""
Generator to process the current queue of multi's
note: This generator will yield both data and FoursquareException's
The code processing this sequence must check the yields for their type.
The exceptions should be handled by the calling code, or raised.
"""
while self.requester.multi_requests:
# Pull n requests from the multi-request queue
requests = self.requester.multi_requests[:MAX_MULTI_REQUESTS]
del(self.requester.multi_requests[:MAX_MULTI_REQUESTS])
# Process the 4sq multi request
params = {
'requests': ','.join(requests),
}
responses = self.GET(params=params)['responses']
# ... and yield out each individual response
for response in responses:
# Make sure the response was valid
try:
_raise_error_from_response(response)
yield response['response']
except FoursquareException, e:
yield e
@property
def num_required_api_calls(self):
"""Returns the expected number of API calls to process"""
return int(math.ceil(len(self.requester.multi_requests) / float(MAX_MULTI_REQUESTS)))
def _log_and_raise_exception(msg, data, cls=FoursquareException):
"""Calls log.error() then raises an exception of class cls"""
data = u'{0}'.format(data)
# We put data as a argument for log.error() so error tracking systems such
# as Sentry will properly group errors together by msg only
log.error(u'{0}: %s'.format(msg), data)
raise cls(u'{0}: {1}'.format(msg, data))
"""
Network helper functions
"""
#def _request_with_retry(url, headers={}, data=None):
def _get(url, headers={}, params=None):
"""Tries to GET data from an endpoint using retries"""
param_string = _foursquare_urlencode(params)
for i in xrange(NUM_REQUEST_RETRIES):
try:
try:
response = requests.get(url, headers=headers, params=param_string, verify=VERIFY_SSL)
return _process_response(response)
except requests.exceptions.RequestException, e:
_log_and_raise_exception('Error connecting with foursquare API', e)
except FoursquareException, e:
# Some errors don't bear repeating
if e.__class__ in [InvalidAuth, ParamError, EndpointError, NotAuthorized, Deprecated]: raise
# If we've reached our last try, re-raise
if ((i + 1) == NUM_REQUEST_RETRIES): raise
time.sleep(1)
def _post(url, headers={}, data=None, files=None):
"""Tries to POST data to an endpoint"""
try:
response = requests.post(url, headers=headers, data=data, files=files, verify=VERIFY_SSL)
return _process_response(response)
except requests.exceptions.RequestException, e:
_log_and_raise_exception('Error connecting with foursquare API', e)
def _process_response(response):
"""Make the request and handle exception processing"""
# Read the response as JSON
try:
data = response.json()
except ValueError:
_log_and_raise_exception('Invalid response', response.text)
# Default case, Got proper response
if response.status_code == 200:
return { 'headers': response.headers, 'data': data }
return _raise_error_from_response(data)
def _raise_error_from_response(data):
"""Processes the response data"""
# Check the meta-data for why this request failed
meta = data.get('meta')
if meta:
# Account for foursquare conflicts
# see: https://developer.foursquare.com/overview/responses
if meta.get('code') in (200, 409): return data
exc = error_types.get(meta.get('errorType'))
if exc:
raise exc(meta.get('errorDetail'))
else:
_log_and_raise_exception('Unknown error. meta', meta)
else:
_log_and_raise_exception('Response format invalid, missing meta property. data', data)
def _as_utf8(s):
try:
return str(s)
except UnicodeEncodeError:
return unicode(s).encode('utf-8')
def _foursquare_urlencode(query, doseq=0, safe_chars="&/,+"):
"""Gnarly hack because Foursquare doesn't properly handle standard url encoding"""
# Original doc: http://docs.python.org/2/library/urllib.html#urllib.urlencode
# Works the same way as urllib.urlencode except two differences -
# 1. it uses `quote()` instead of `quote_plus()`
# 2. it takes an extra parameter called `safe_chars` which is a string
# having the characters which should not be encoded.
#
# Courtesy of github.com/iambibhas
if hasattr(query,"items"):
# mapping objects
query = query.items()
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty,va,tb = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", tb
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = urllib.quote(_as_utf8(k), safe=safe_chars)
v = urllib.quote(_as_utf8(v), safe=safe_chars)
l.append(k + '=' + v)
else:
for k, v in query:
k = urllib.quote(_as_utf8(k), safe=safe_chars)
if isinstance(v, (str, unicode)):
v = urllib.quote(_as_utf8(v), safe=safe_chars)
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
len(v)
except TypeError:
# not a sequence
v = urllib.quote(_as_utf8(v), safe=safe_chars)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + urllib.quote(_as_utf8(elt)))
return '&'.join(l)
| {
"content_hash": "2169a718b4aee81f7b1a59d95f3a3379",
"timestamp": "",
"source": "github",
"line_count": 890,
"max_line_length": 122,
"avg_line_length": 41.45168539325843,
"alnum_prop": 0.5987476959774477,
"repo_name": "CzechHackathon2014/juice-my-device",
"id": "e788535c9f99a0560bcc741ff88b748fbfdb9f6b",
"size": "36960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jmd/foursquare/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9031"
},
{
"name": "JavaScript",
"bytes": "36697"
},
{
"name": "Python",
"bytes": "342440"
}
],
"symlink_target": ""
} |
"""Email pipeline for scanner summary."""
# TODO: Investigate improving so we can avoid the pylint disable.
# pylint: disable=line-too-long
import collections
from google.cloud.security.common.util import log_util
from google.cloud.security.common.util.email_util import EmailUtil
from google.cloud.security.common.gcp_type import resource_util
from google.cloud.security.notifier.pipelines import base_notification_pipeline as bnp
# pylint: enable=line-too-long
# TODO: The next editor must remove this disable and correct issues.
# pylint: disable=missing-type-doc,redundant-returns-doc
# pylint: disable=missing-param-doc
LOGGER = log_util.get_logger(__name__)
class EmailScannerSummaryPipeline(bnp.BaseNotificationPipeline):
"""Email pipeline for scanner summary."""
# TODO: See if the base pipline init() can be reused.
def __init__(self, sendgrid_key): # pylint: disable=super-init-not-called
self.email_util = EmailUtil(sendgrid_key)
def _compose( # pylint: disable=arguments-differ
self, all_violations, total_resources):
"""Compose the scan summary.
Build a summary of the violations and counts for the email.
resource summary:
{
RESOURCE_TYPE: {
'pluralized_resource_type': '{RESOURCE_TYPE}s'
'total': TOTAL,
'violations': {
RESOURCE_ID: NUM_VIOLATIONS,
RESOURCE_ID: NUM_VIOLATIONS,
...
}
},
...
}
Args:
all_violations: List of violations.
total_resources: A dict of the resources and their count.
Returns:
total_violations: Integer of the total violations.
resource_summaries: Dictionary of resource to violations.
{'organization':
{'pluralized_resource_type': 'Organizations',
'total': 1,
'violations': OrderedDict([('660570133860', 67)])},
'project':
{'pluralized_resource_type': 'Projects',
'total': 41,
'violations': OrderedDict([('foo1_project', 111),
('foo2_project', 222),
('foo3_project', 333)])}}
"""
resource_summaries = {}
total_violations = 0
for violation in sorted(all_violations, key=lambda v: v.resource_id):
resource_type = violation.resource_type
if resource_type not in resource_summaries:
resource_summaries[resource_type] = {
'pluralized_resource_type': resource_util.pluralize(
resource_type),
'total': total_resources[resource_type],
'violations': collections.OrderedDict()
}
# Keep track of # of violations per resource id.
if (violation.resource_id not in
resource_summaries[resource_type]['violations']):
resource_summaries[resource_type][
'violations'][violation.resource_id] = 0
resource_summaries[resource_type][
'violations'][violation.resource_id] += len(violation.members)
total_violations += len(violation.members)
return total_violations, resource_summaries
def _send( # pylint: disable=arguments-differ
self, csv_name, output_filename, now_utc, violation_errors,
total_violations, resource_summaries, email_sender,
email_recipient):
"""Send a summary email of the scan.
Args:
csv_name: The full path of the local csv filename.
output_filename: String of the output filename.
now_utc: The UTC datetime right now.
violation_errors: Iterable of violation errors.
total_violations: Integer of the total violations.
resource_summaries: Dictionary of resource to violations.
{'organization':
{'pluralized_resource_type': 'Organizations',
'total': 1,
'violations': OrderedDict([('660570133860', 67)])},
'project':
{'pluralized_resource_type': 'Projects',
'total': 41,
'violations': OrderedDict([('foo1_project', 111),
('foo2_project', 222),
('foo3_project', 333)])}}
email_sender: String of the sender of the email.
email_recipient: String of the recipient of the email.
"""
# Render the email template with values.
scan_date = now_utc.strftime('%Y %b %d, %H:%M:%S (UTC)')
email_content = EmailUtil.render_from_template(
'scanner_summary.jinja', {
'scan_date': scan_date,
'resource_summaries': resource_summaries,
'violation_errors': violation_errors,
})
# Create an attachment out of the csv file and base64 encode the
# content.
attachment = EmailUtil.create_attachment(
file_location=csv_name,
content_type='text/csv',
filename=output_filename,
disposition='attachment',
content_id='Scanner Violations'
)
scanner_subject = 'Policy Scan Complete - {} violation(s) found'.format(
total_violations)
self.email_util.send(
email_sender=email_sender,
email_recipient=email_recipient,
email_subject=scanner_subject,
email_content=email_content,
content_type='text/html',
attachment=attachment)
def run( # pylint: disable=arguments-differ
self, csv_name, output_filename, now_utc, all_violations,
total_resources, violation_errors, email_sender, email_recipient):
"""Run the email pipeline
Args:
csv_name: The full path of the local csv filename.
output_filename: String of the output filename.
now_utc: The UTC datetime right now.
all_violations: The list of violations.
total_resources: A dict of the resources and their count.
violation_errors: Iterable of violation errors.
email_sender: String of the sender of the email.
email_recipient: String of the recipient of the email.
Returns:
None
"""
total_violations, resource_summaries = self._compose(
all_violations, total_resources)
self._send(csv_name, output_filename, now_utc, violation_errors,
total_violations, resource_summaries, email_sender,
email_recipient)
| {
"content_hash": "0f57b92d4f3ca1b3f657d26093096e2c",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 86,
"avg_line_length": 41.791666666666664,
"alnum_prop": 0.5625979205241418,
"repo_name": "thenenadx/forseti-security",
"id": "a61b168a9c45992b19a311d4a90897178951a797",
"size": "7595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google/cloud/security/notifier/pipelines/email_scanner_summary_pipeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5851"
},
{
"name": "Protocol Buffer",
"bytes": "10441"
},
{
"name": "Python",
"bytes": "1985604"
},
{
"name": "Shell",
"bytes": "2737"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Image'
db.create_table('image_image', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('tags', self.gf('tagging.fields.TagField')()),
('uploaded', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('image', ['Image'])
def backwards(self, orm):
# Deleting model 'Image'
db.delete_table('image_image')
models = {
'image.image': {
'Meta': {'object_name': 'Image'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'tags': ('tagging.fields.TagField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'uploaded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['image']
| {
"content_hash": "fb2201f029a23f228a67119321c9e9de",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 119,
"avg_line_length": 45.19047619047619,
"alnum_prop": 0.5748155953635405,
"repo_name": "servee/django-servee-oldcontrib",
"id": "cfe695bbdccdea39151252ed97ab25b6c03aa282",
"size": "1916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oldcontrib/media/image/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "18378"
},
{
"name": "Python",
"bytes": "61439"
}
],
"symlink_target": ""
} |
####################################################################
# Imports
####################################################################
import os
import pandas as pd
import sqlite3 as sqlite
from pandas.io import sql
####################################################################
# Connect to database
####################################################################
DBPATH = os.path.join(os.path.dirname(__file__), "empdata.db")
cnx = sqlite.connect(DBPATH)
cursor = cnx.cursor()
####################################################################
# Globals
####################################################################
EMPDATAPATH = os.path.join(os.path.dirname(__file__), "..","data","employment_data.csv")
RATEDATAPATH = os.path.join(os.path.dirname(__file__), "..","data","rate_data.csv")
RIDDATAPATH = os.path.join(os.path.dirname(__file__), "..","data","fips_to_rids_update2.csv")
area_offices = ["Mobile Area Office","Birmingham Area Office","Anchorage Area Office","Phoenix Area Office",\
"Little Rock Area Office","Oakland Area Office","San Diego Area Office","Denver Area Office",\
"Englewood Area Office","Bridgeport Area Office","Hartford Area Office","Wilmington Area Office",\
"Jacksonville Area Office","Tampa Area Office","Fort Lauderdale Area Office","Savannah Area Office",\
"Atlanta East Area Office","Atlanta West Area Office","Honolulu Area Office","Boise Area Office",\
"Peoria Area Office","Fairview Heights Area Office","North Aurora Area Office","Calumet City Area Office",\
"Chicago North Area Office","Indianapolis Area Office","Des Moines Area Office","Witchita Area Office",\
"Nashville Area Office","Baton Rouge Area Office","Augusta Area Office","Bangor District Office",\
"Baltimore Washington Area Office","Braintree Area Office","Springfield Area Office","Andover Area Office",\
"Lansing Area Office","Eau Claire Area Office","Jackson Area Office","Kansas City Area Office",\
"St. Louis Area Office","Billings Area Office","Omaha Area Office","Las Vegas Area Office",\
"Concord Area Office","Marlton Area Office","Hasbrouck Heights Area Office","Parsippany Area Office",\
"Avenel Area Office","El Paso Area Office","Albany Area Office","Buffalo Area Office","Tarrytown Area Office",\
"Syracuse Area Office","Manhattan Area Office","Long Island Area Office",\
"Queens District Office of the Manhattan Area Office","Raleigh Area Office","Bismarck Area Office",\
"Cincinnati Area Office","Toledo Area Office","Cleveland Area Office","Columbus Area Office",\
"Oklahoma Area Office","Portland Area Office","Harrisburg Area Office","Pittsburgh Area Office",\
"Wilkes Barre Area Office","Allentown Area Office","Erie Area Office","Philadelphia Area Office",\
"Providence Area Office","Columbia Area Office","Dallas Area Office","Lubbock Area Office",\
"Houston North Area Office","Corpus Christi Area Office","Fort Worth Area Office",\
"San Antonio Area Office","Austin Area Office","Houston South Area Office","Boston Regional Office",\
"Norfolk Area Office","Bellevue Area Office","Charleston Area Office","Madison Area Office",\
"Appleton Area Office","Milwaukee Area Office","Denver Regional Office","Puerto Rico Area Office"]
# Area Offices are more often known by their 'RID' - See lookup table in data folder.
rids = [418600,418300,1032100,936400,627100,936100,936200,830500,830600,111500,112000,317300,419700,420600,418800,\
418400,418100,418200,936300,1032500,524500,524530,521400,521700,524200,524500,523100,728100,729700,420100,\
625700,111100,112900,316100,111400,112600,134000,522900,523900,419400,728500,729300,830100,728900,936500,\
111700,213900,214500,214200,213400,627500,213100,213600,216000,215800,215000,214700,215600,420300,830300,\
522000,524700,522300,522500,627700,1032700,316700,317500,317700,317900,336000,317000,112300,418500,626300,\
627510,626600,626000,636900,625500,625400,626700,100000,316300,1032300,316400,523300,521100,523400,800000,\
215300]
industries = ["Agriculture, forestry, fishing and hunting","Mining","Utilities","Construction","Manufacturing",\
"Wholesale trade","Transportation and warehousing","Information","Finance and insurance",\
"Real estate and rental and leasing","Professional, scientific, and technical services (scope changed in 2009)",
"Management of companies and enterprises","Administrative and support and waste management and remediation services",\
"Educational services","Health care and social assistance","Arts, entertainment, and recreation",\
"Accommodation and food services","Other services, except public administration"]
####################################################################
# Open employment data csv
# Load into Pandas to wrangle
####################################################################
read_emp = pd.read_csv(EMPDATAPATH, low_memory=False)
view_emp = pd.DataFrame(read_emp, columns=['index','fipstate', 'fipscty', 'naics', 'emp', 'year'])
view_emp = view_emp[['fipstate','fipscty','emp','naics','year']]
def load_emp():
'''
Puts all of the employment data into a sql table
'''
try:
sql.has_table('allemp', con=cnx)
print "Employment data already loaded."
except ValueError:
sql.to_sql(view_emp, name = 'allemp', con=cnx)
print "Employment data successfully loaded."
####################################################################
# Open injury and illness rate csv
# Load into Pandas to wrangle - just get DAWF for 100 FTW
####################################################################
read_rates = pd.read_csv(RATEDATAPATH, low_memory=False)
view_rates = pd.DataFrame(read_rates, columns=['index','area_name','case_type_code','data_type_code','naics','year','value'])
view_rates = view_rates[['area_name','case_type_code','data_type_code','naics','year','value']]
# national averages only
US_all = view_rates[view_rates.area_name == 'Private industry, All U.S.']
# just select case type 3 - Days Away from Work
DAWF = US_all[US_all.case_type_code == '3']
# just select data type 3 - Rate of injury/illness per 100 FTW
DAWF_rate = DAWF[DAWF.data_type_code == '3']
DAWF_rate = DAWF_rate.drop('area_name', 1)
DAWF_rate = DAWF_rate.drop('case_type_code', 1)
DAWF_rate = DAWF_rate.drop('data_type_code', 1)
def load_rates():
'''
Puts all the DAWF rates into a sql table
'''
try:
sql.has_table('DAWF', con=cnx)
print "Injury rate data already loaded."
except ValueError:
sql.to_sql(DAWF_rate, name = 'DAWF', con=cnx)
print "Injury rate data successfully loaded."
####################################################################
# Open area office data csv
# Load into Pandas to wrangle
####################################################################
fips_to_rids = pd.read_csv(RIDDATAPATH)
aos = pd.DataFrame(fips_to_rids, columns=['fipstate', 'fipscty', 'county', 'state', 'area_office', 'rid','region'])
def load_aos():
'''
Puts all the Area Offices into a sql table
'''
try:
sql.has_table('AOS', con=cnx)
print "Area office data already loaded."
except ValueError:
sql.to_sql(aos, name = 'AOS', con=cnx)
print "Area office data successfully loaded."
####################################################################
# Lookup FIP by Area Office - either by RID or Area Office Name
####################################################################
def ao_to_fip(ao):
'''
If you know the name of the area office, generate the FIP state and county codes
as well as the name of the counties that are under that area office's jurisdiction.
'''
cursor.execute("SELECT fipstate,fipscty,county FROM AOS WHERE area_office = ?", (ao,))
fip = cursor.fetchall()
return fip
def rid_to_fip(rid):
'''
If you know the RID number for the area office, generate the FIP state and county codes
as well as the name of the counties that are under that area office's jurisdiction.
'''
cursor.execute("SELECT fipstate,fipscty,county FROM AOS WHERE rid = ?", (rid,))
fip = cursor.fetchall()
return fip
####################################################################
# Precompute number of workers by area office
####################################################################
def create_ao_workers():
'''
Precomputes number of workers that each area office covers.
Stores values in a sql table
'''
sql_commit = (
"CREATE TABLE IF NOT EXISTS workers ("
" id INTEGER PRIMARY KEY AUTOINCREMENT,"
" area_office TEXT NOT NULL,"
" worker_count INT"
")"
)
cursor.execute(sql_commit)
print "Worker data by area office successfully created."
def worker_count(state_code,city_code, industry, year=2013):
'''
Return the number of workers for a given state code, city code, year, and industry.
'''
cursor.execute("SELECT emp FROM allemp WHERE fipstate = ? AND fipscty = ? AND naics=? AND year=?", (state_code,city_code,industry,year))
emp = int(cursor.fetchone()[0])
return emp
def ao_worker_count(ao):
'''
Return the number of workers in a given area office (which includes multiple FIPs).
'''
fips = rid_to_fip(ao)
total = 0
for f in fips:
state = f[0]
city = f[1]
for i in industries:
try:
total += worker_count(state,city,i)
except TypeError:
pass
return total
def all_aos_worker_count():
sql_commit = "INSERT INTO workers (area_office, worker_count) VALUES (?,?)"
for ao in rids:
count = ao_worker_count(ao)
cursor.execute(sql_commit, (ao,count,))
cnx.commit()
print "Successfully committed the count for %s" % ao
if __name__ == "__main__":
# Build the SQL tables to run the queries
load_emp()
load_rates()
load_aos()
create_ao_workers()
all_aos_worker_count()
# Test by printing out the employment for a given industry in a given county
cursor.execute("SELECT emp FROM allemp WHERE fipstate = ? AND fipscty = ? AND naics=? AND year=?", (1,1,"Construction",2013))
emp = int(cursor.fetchone()[0])
print "In 2013, Autauga County had %d workers in the Construction industry." % emp
# Test by printing out the injury rate for a given industry
cursor.execute("SELECT value FROM DAWF WHERE naics = ? AND year = ?", ("Construction", 2013,))
rate = float(cursor.fetchone()[0])
print "That year, the BLS DAWF injury rate for Construction was %d." % rate
# Test by printing out all the FIPs for a given area office
cursor.execute("SELECT county FROM AOS WHERE rid = ?", (418600,))
fip = cursor.fetchall()
print "The Mobile Area Office is responsible for the following counties:"
for f in fip:
print f | {
"content_hash": "c22057ff9095cd3e0062705c6dd817cd",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 137,
"avg_line_length": 46.84848484848485,
"alnum_prop": 0.6289964886342635,
"repo_name": "oshadatasci/tsoan",
"id": "da852aa1637b109622b79050216bc598d493cd2c",
"size": "10836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wrangle/wrangle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19564"
},
{
"name": "Stata",
"bytes": "2616"
}
],
"symlink_target": ""
} |
"""
Pluggable system for TaskWarrior hooks
Adapted from https://github.com/tbabej/taskpirate
by Gabriel Alcaras
"""
import glob
import importlib.util as lib
import os
def find_hooks(file_prefix):
"""
Find all files in subdirectories whose names start with <file_prefix>
"""
file_pattern = os.path.dirname(__file__) + '/*/' + file_prefix + "*.py"
module_paths = [f for f in glob.glob(file_pattern) if os.path.isfile(f)]
module_paths.sort()
# Gather all hooks in these files
hooks = []
for module_path in module_paths:
# Load the module
module_dir = os.path.dirname(module_path)
module_filename = os.path.basename(module_path)
module_name = 'pirate_{0}_{1}'.format(module_dir, module_filename)
module_name = module_name.replace('.', '_')
spec = lib.spec_from_file_location(module_name, module_path)
module = lib.module_from_spec(spec)
spec.loader.exec_module(module)
# Find all hook methods available
module_hooks = [
getattr(module, hook_name)
for hook_name in dir(module)
if hook_name.startswith('hook_')
]
hooks += module_hooks
return hooks
| {
"content_hash": "98832a0f1c8a5df65b7990d1d94e48cc",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 28.441860465116278,
"alnum_prop": 0.6255110384300899,
"repo_name": "gaalcaras/dotfiles",
"id": "5918a4302a2908acd534dacbb9c0dd0ee237c983",
"size": "1245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "task/hooks/find_hooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5420"
},
{
"name": "Lua",
"bytes": "4940"
},
{
"name": "Python",
"bytes": "17845"
},
{
"name": "Shell",
"bytes": "55722"
},
{
"name": "TeX",
"bytes": "2942"
},
{
"name": "Vim script",
"bytes": "42670"
}
],
"symlink_target": ""
} |
from libpymux.session import Session
from libpymux.log import logger
from libpymux.window import Window
from pymux.panes import BashPane
import asyncio
import concurrent
class PyMuxSession(Session):
def __init__(self):
super().__init__()
self.pane_executor = concurrent.futures.ThreadPoolExecutor(1024)
self.pane_runners = [ ] # Futures
# Create first window/pane.
self.create_new_window()
def create_new_window(self):
logger.info('create_new_window')
window = Window()
self.add_window(window)
pane = BashPane(self.pane_executor)
window.add_pane(pane)
self._run_pane(window, pane)
def split_pane(self, vsplit):
pane = BashPane(self.pane_executor)
self.active_window.add_pane(pane, vsplit=vsplit)
self._run_pane(self.active_window, pane)
def _run_pane(self, window, pane):
# Create coroutine which handles the creation/deletion of this pane in
# the session.
f = None
@asyncio.coroutine
def run_pane():
yield from pane.run()
self.pane_runners.remove(f)
# Focus next pane in this window when this one was focussed.
if len(window.panes) > 1 and window.active_pane == pane:
window.focus_next()
pane.parent.remove(pane)
window.panes.remove(pane)
# When this window doesn't contain any panes anymore. Remove window
# from session.
if len(window.panes) == 0:
self.windows.remove(window)
if window == self.active_window:
if self.windows:
self.active_window = self.windows[0]
else:
self.active_window = None
self.invalidate(Redraw.All)
f = asyncio.async(run_pane())
self.pane_runners.append(f)
@asyncio.coroutine
def run(self):
""" Run until we don't have panes anymore. """
while True:
runners = self.pane_runners
if runners:
#yield from asyncio.gather(* runners)
# Wait until one pane is ready
done, pending = yield from asyncio.wait(
runners, return_when=asyncio.tasks.FIRST_COMPLETED)
else:
break
| {
"content_hash": "0ff31e0df4fd9c4c8e4fa4dfc015e25d",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 79,
"avg_line_length": 29.9875,
"alnum_prop": 0.5689870779491455,
"repo_name": "jonathanslenders/python-vterm",
"id": "46e759e48e5427bf20266078b327ccfbbbd10a02",
"size": "2399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymux/session.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "88384"
}
],
"symlink_target": ""
} |
"""Tests for object_detection.core.box_coder."""
import tensorflow.compat.v1 as tf
from object_detection.core import box_coder
from object_detection.core import box_list
from object_detection.utils import test_case
class MockBoxCoder(box_coder.BoxCoder):
"""Test BoxCoder that encodes/decodes using the multiply-by-two function."""
def code_size(self):
return 4
def _encode(self, boxes, anchors):
return 2.0 * boxes.get()
def _decode(self, rel_codes, anchors):
return box_list.BoxList(rel_codes / 2.0)
class BoxCoderTest(test_case.TestCase):
def test_batch_decode(self):
expected_boxes = [[[0.0, 0.1, 0.5, 0.6], [0.5, 0.6, 0.7, 0.8]],
[[0.1, 0.2, 0.3, 0.4], [0.7, 0.8, 0.9, 1.0]]]
def graph_fn():
mock_anchor_corners = tf.constant(
[[0, 0.1, 0.2, 0.3], [0.2, 0.4, 0.4, 0.6]], tf.float32)
mock_anchors = box_list.BoxList(mock_anchor_corners)
mock_box_coder = MockBoxCoder()
encoded_boxes_list = [mock_box_coder.encode(
box_list.BoxList(tf.constant(boxes)), mock_anchors)
for boxes in expected_boxes]
encoded_boxes = tf.stack(encoded_boxes_list)
decoded_boxes = box_coder.batch_decode(
encoded_boxes, mock_box_coder, mock_anchors)
return decoded_boxes
decoded_boxes_result = self.execute(graph_fn, [])
self.assertAllClose(expected_boxes, decoded_boxes_result)
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "1af641379b2cdaa9818ac2ebc73fb051",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 78,
"avg_line_length": 31.404255319148938,
"alnum_prop": 0.6334688346883469,
"repo_name": "tombstone/models",
"id": "52765a9d06c990c483aaf87dcba3ecfe604d7adc",
"size": "2166",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "research/object_detection/core/box_coder_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1365199"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "1858048"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Python",
"bytes": "7241242"
},
{
"name": "Shell",
"bytes": "102270"
},
{
"name": "TypeScript",
"bytes": "6515"
}
],
"symlink_target": ""
} |
from django.shortcuts import get_object_or_404
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed, FeedDoesNotExist
from django.core.urlresolvers import reverse
from news.models import Article, Section
class NewsFeed(Feed):
_site = Site.objects.get_current()
title = "%s news feed" % _site.name
description = "Updates and News from %s" % _site.name
description_template = 'news/news_description.html'
def link(self):
return reverse('article_list')
def items(self):
return Article.objects.published()[:10]
def item_tilte(self, item):
return item.title
def item_description(self, item):
return item.content_html
def item_pubdate(self, obj):
return obj.published
class NewsBySection(Feed):
_site = Site.objects.get_current()
title = '%s new category feed' % _site.name
def get_object(self, request, slug):
return get_object_or_404(Section, slug=slug)
def link(self, obj):
return obj.get_absolute_url()
def description(self, obj):
return "News recently categorized as %s" % obj.title
def items(self, obj):
return obj.articles.published()[:10]
| {
"content_hash": "4fb2046e538794f48e8638077c55998d",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 67,
"avg_line_length": 27.90909090909091,
"alnum_prop": 0.6767100977198697,
"repo_name": "ilendl2/chrisdev-cookiecutter",
"id": "0b8d64ad30a24a3f37407d206dd1da141a170a10",
"size": "1228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/news/feeds.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "21545"
},
{
"name": "JavaScript",
"bytes": "8393"
},
{
"name": "PHP",
"bytes": "238"
},
{
"name": "Perl",
"bytes": "84"
},
{
"name": "Python",
"bytes": "168424"
},
{
"name": "Shell",
"bytes": "5146"
}
],
"symlink_target": ""
} |
import random
import time
from game.simon import Simon
import os
print('Dont forget to run:')
print('roslaunch my_skeleton_markers markers.launch &')
print('python skeleton_markers_reader.py')
print('-------')
#l = kinect.skeleton_markers_reader.kinect_listener()
os.system('kinect/skeleton_markers_reader.py')
# basic system parameters
sleep_time = 0.25
round_duration = 5
time_steps = int(float(round_duration) / sleep_time)
# initialization
simon = Simon()
simon.howie.move_to_pose(simon.howie.base_pose)
# introduction
simon.howie.play_file('detection instruction.wav')
simon.howie.move_to_pose(simon.howie.poses['both_hands_up'])
simon.child.current_state = 'wait_for_start_pose'
while simon.child.current_state != 'start_pose_detected':
time.sleep(sleep_time)
pass
simon.howie.play_file('intro.wav')
simon.child.current_state = 'wait_for_yes_pose'
while simon.child.current_state != 'yes_pose_detected':
time.sleep(sleep_time)
pass
# starting to play
number_of_rounds = 10
for a in range(number_of_rounds):
# select a random action, and random hertzel says
simon.robot_performs_action()
simon.child.current_state = 'wait_for_current_pose'
# how long to wait until next round
for t in range(time_steps):
time.sleep(sleep_time)
# checking for updates from kinect
if simon.child.current_state == 'received_pose':
# detected a pose
if simon.pose_selected in simon.get_pose_detected_names():
# child performed the pose required
if simon.hertzel_says:
simon.howie.play_file(random.choice(simon.howie.utterances['correct_pose']))
print('correct pose')
else:
simon.howie.play_file(random.choice(simon.howie.utterances['got you']))
print('Got you!')
break
else:
print('wrong pose')
simon.child.current_state = 'wait_for_current_pose'
# time passed, did the child respond correctly?
if simon.hertzel_says:
simon.howie.play_file(random.choice(simon.howie.utterances['didnt get it']))
print('I said Simon says, you didnt get it')
else:
simon.howie.play_file(random.choice(simon.howie.utterances['good job']))
print('goob job, didnt fool you')
simon.howie.play_file('bye.wav') | {
"content_hash": "dac3361bf4cce5041d15f2fd2c35573c",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 96,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.6489273927392739,
"repo_name": "CuriosityLabTAU/physicial_curiosity",
"id": "73e45b1b94b4d769893a7d1106d3d741187d8d35",
"size": "2424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main_simon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "59982"
}
],
"symlink_target": ""
} |
from collections import Counter
import sys
import time
import ray
@ray.remote
def gethostname(x):
import platform
import time
time.sleep(0.01)
return x + (platform.node(), )
def wait_for_nodes(expected):
# Wait for all nodes to join the cluster.
while True:
num_nodes = len(ray.nodes())
if num_nodes < expected:
print("{} nodes have joined so far, waiting for {} more.".format(
num_nodes, expected - num_nodes))
sys.stdout.flush()
time.sleep(1)
else:
break
def main():
wait_for_nodes(4)
# Check that objects can be transferred from each node to each other node.
for i in range(10):
print("Iteration {}".format(i))
results = [
gethostname.remote(gethostname.remote(())) for _ in range(100)
]
print(Counter(ray.get(results)))
sys.stdout.flush()
print("Success!")
sys.stdout.flush()
time.sleep(20)
if __name__ == "__main__":
ray.init(address="localhost:6379")
main()
| {
"content_hash": "8637a9a7afccca54af4eeb694deea365",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 78,
"avg_line_length": 22.76595744680851,
"alnum_prop": 0.5803738317757009,
"repo_name": "pcmoritz/ray-1",
"id": "6275ec87efb580e33a6b59e24be7f155ea7aaeec",
"size": "1070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/yarn/example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
} |
import os
class ProgressBar:
def __init__(self, task_number, bar_opening="[", bar_ending="]", empty_char="-", filled_char="=",
update_rate=0, percent_precision=1, display_percent=True, display_absolute_progress=True, bar_length=0,
enable_front_char=False, front_char=">", unit=''):
self.__task_number = task_number
self.__bar_opening = bar_opening
self.__bar_ending = bar_ending
self.__empty_char = empty_char
self.__filled_char = filled_char
self.__update_rate = update_rate
self.__percent_precision = str(percent_precision)
self.__display_percent = display_percent
self.__display_absolute_progress = display_absolute_progress
self.__unit = ' ' + unit if len(unit) > 0 else ''
if bar_length > 0:
self.__bar_length = min(bar_length, self.__compute_max_length())
else:
self.__bar_length = self.__compute_max_length()
self.__enable_front_char = enable_front_char
self.__front_char = front_char
def begin(self):
self.__update_count = 0
self.__current_length = 0
self.__current_progress = 0
print(self.__get_bar_string(), end='\r')
def add_progress(self, inc=1):
increment = inc if inc > 0 else 1
if self.__current_progress < self.__task_number:
prev_percent = self.__get_percent_progress()
self.__current_progress = min(self.__task_number, self.__current_progress + increment)
self.__update_count += increment
new_length = int(self.__get_progress() * self.__bar_length)
if self.__update_rate > 0:
need_to_update = self.__update_count >= self.__update_rate
else:
need_to_update = new_length > self.__current_length or prev_percent != self.__get_percent_progress()
if need_to_update or self.__current_progress == self.__task_number:
self.__update_count = 0
self.__current_length = new_length
end_char = "\r" if self.__current_progress < self.__task_number else "\n"
print(self.__get_bar_string(), end=end_char)
def __get_progress(self):
return float(float(self.__current_progress) / float(self.__task_number))
def __get_percent_progress(self):
format_string = "{0:." + self.__percent_precision + "f}"
return format_string.format(self.__get_progress() * 100) + "%"
def __get_progress_fraction(self):
return str(self.__current_progress) + "/" + str(self.__task_number)
def __get_bar_string(self):
diff = self.__bar_length - self.__current_length - (1 if self.__enable_front_char else 0)
progresses = ""
if self.__display_percent:
progresses += " : " + self.__get_percent_progress()
progresses += " (" + self.__get_progress_fraction() + self.__unit + ")" if self.__display_absolute_progress else ""
elif self.__display_absolute_progress:
progresses += " : " + self.__get_progress_fraction() + self.__unit
front_char = self.__front_char if (
self.__enable_front_char and self.__current_progress < self.__task_number) else ""
return ( self.__bar_opening + self.__current_length * self.__filled_char + front_char +
diff * self.__empty_char + self.__bar_ending + progresses )
def __compute_max_length(self):
sz = None
try:
sz = os.get_terminal_size().columns
except:
pass
if sz is None:
sz = 80
max_length = int(sz)
max_length -= (len(str(self.__task_number)) * 2 + 1) if self.__display_absolute_progress else 0
max_length -= len(self.__bar_opening)
max_length -= len(self.__bar_ending)
max_length -= (5 + int(self.__percent_precision)) if self.__display_percent else 0
max_length -= 1 if int(self.__percent_precision) > 0 else 0
max_length -= 3 if (self.__display_percent and self.__display_absolute_progress) else 0
max_length -= 2 if (self.__display_percent or self.__display_absolute_progress) else 0
max_length -= len(self.__unit)
return max_length - 1
| {
"content_hash": "be0afea882cfec1d10dc854d2c353840",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 127,
"avg_line_length": 45.10526315789474,
"alnum_prop": 0.5750291715285881,
"repo_name": "Rabyss/PyProgressBar",
"id": "eaac547b0a82035489d7d52edcda447b80d8e412",
"size": "4307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "progress_bar.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6409"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 30, transform = "Difference", sigma = 0.0, exog_count = 0, ar_order = 12); | {
"content_hash": "663b6e1d6653909f6520d91222bab851",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 170,
"avg_line_length": 38.57142857142857,
"alnum_prop": 0.7111111111111111,
"repo_name": "antoinecarme/pyaf",
"id": "bb7028f70305cbe9ffd08d3d3a3c961f773e1cba",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Difference/trend_MovingAverage/cycle_30/ar_12/test_artificial_128_Difference_MovingAverage_30_12_0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
import json
import unittest
from urllib.parse import quote_plus
from airflow import configuration
from airflow.api.common.experimental.trigger_dag import trigger_dag
from airflow.models import DagBag, DagRun, TaskInstance
from airflow.settings import Session
from airflow.www import app as application
class ApiExperimentalTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
app = application.create_app(testing=True)
self.app = app.test_client()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
def test_task_info(self):
url_template = '/api/experimental/dags/{}/tasks/{}'
response = self.app.get(
url_template.format('example_bash_operator', 'runme_0')
)
self.assertIn('"email"', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
response = self.app.get(
url_template.format('example_bash_operator', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
response = self.app.get(
url_template.format('DNE', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
def test_trigger_dag(self):
url_template = '/api/experimental/dags/{}/dag_runs'
response = self.app.post(
url_template.format('example_bash_operator'),
data=json.dumps(dict(run_id='my_run' + datetime.now().isoformat())),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response = self.app.post(
url_template.format('does_not_exist_dag'),
data=json.dumps(dict()),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
def test_trigger_dag_for_date(self):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
hour_from_now = datetime.now() + timedelta(hours=1)
execution_date = datetime(hour_from_now.year,
hour_from_now.month,
hour_from_now.day,
hour_from_now.hour)
datetime_string = execution_date.isoformat()
# Test Correct execution
response = self.app.post(
url_template.format(dag_id),
data=json.dumps(dict(execution_date=datetime_string)),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(execution_date)
self.assertTrue(dag_run,
'Dag Run not found for execution date {}'
.format(execution_date))
# Test error for nonexistent dag
response = self.app.post(
url_template.format('does_not_exist_dag'),
data=json.dumps(dict(execution_date=execution_date.isoformat())),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
# Test error for bad datetime format
response = self.app.post(
url_template.format(dag_id),
data=json.dumps(dict(execution_date='not_a_datetime')),
content_type="application/json"
)
self.assertEqual(400, response.status_code)
def test_task_instance_info(self):
url_template = '/api/experimental/dags/{}/dag_runs/{}/tasks/{}'
dag_id = 'example_bash_operator'
task_id = 'also_run_this'
execution_date = datetime.now().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(datetime(1990, 1, 1, 1, 1, 1).isoformat())
# Create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_task_instance_info_run',
execution_date=execution_date)
# Test Correct execution
response = self.app.get(
url_template.format(dag_id, datetime_string, task_id)
)
self.assertEqual(200, response.status_code)
self.assertIn('state', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.app.get(
url_template.format('does_not_exist_dag', datetime_string, task_id),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent task
response = self.app.get(
url_template.format(dag_id, datetime_string, 'does_not_exist_task')
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.app.get(
url_template.format(dag_id, wrong_datetime_string, task_id)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.app.get(
url_template.format(dag_id, 'not_a_datetime', task_id)
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
| {
"content_hash": "0bf6259cb82d8c656db96b7a74415d57",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 85,
"avg_line_length": 37.93421052631579,
"alnum_prop": 0.6042317030870621,
"repo_name": "hamedhsn/incubator-airflow",
"id": "dacee321ece668aee52a65354e3a052f365ab836",
"size": "6333",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/www/api/experimental/test_endpoints.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57001"
},
{
"name": "HTML",
"bytes": "145790"
},
{
"name": "JavaScript",
"bytes": "1364376"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2069853"
},
{
"name": "Shell",
"bytes": "20906"
}
],
"symlink_target": ""
} |
import vim
from ycm import vimsupport
from ycm.completers.completer import Completer
OMNIFUNC_RETURNED_BAD_VALUE = 'Omnifunc returned bad value to YCM!'
OMNIFUNC_NOT_LIST = ( 'Omnifunc did not return a list or a dict with a "words" '
' list when expected.' )
class OmniCompleter( Completer ):
def __init__( self ):
super( OmniCompleter, self ).__init__()
self.omnifunc = None
self.stored_candidates = None
def SupportedFiletypes( self ):
return []
def ShouldUseCache( self ):
return vimsupport.GetBoolValue( "g:ycm_cache_omnifunc" )
def ShouldUseNow( self, start_column ):
if self.ShouldUseCache():
return super( OmniCompleter, self ).ShouldUseNow( start_column )
return self.ShouldUseNowInner( start_column )
def ShouldUseNowInner( self, start_column ):
if not self.omnifunc:
return False
return super( OmniCompleter, self ).ShouldUseNowInner( start_column )
def CandidatesForQueryAsync( self, query, unused_start_column ):
if self.ShouldUseCache():
return super( OmniCompleter, self ).CandidatesForQueryAsync(
query, unused_start_column )
else:
return self.CandidatesForQueryAsyncInner( query, unused_start_column )
def CandidatesForQueryAsyncInner( self, query, unused_start_column ):
if not self.omnifunc:
self.stored_candidates = None
return
try:
return_value = int( vim.eval( self.omnifunc + '(1,"")' ) )
if return_value < 0:
self.stored_candidates = None
return
omnifunc_call = [ self.omnifunc,
"(0,'",
vimsupport.EscapeForVim( query ),
"')" ]
items = vim.eval( ''.join( omnifunc_call ) )
if 'words' in items:
items = items['words']
if not hasattr( items, '__iter__' ):
raise TypeError( OMNIFUNC_NOT_LIST )
self.stored_candidates = filter( bool, items )
except (TypeError, ValueError) as error:
vimsupport.PostVimMessage(
OMNIFUNC_RETURNED_BAD_VALUE + ' ' + str( error ) )
self.stored_candidates = None
return
def AsyncCandidateRequestReadyInner( self ):
return True
def OnFileReadyToParse( self ):
self.omnifunc = vim.eval( '&omnifunc' )
def CandidatesFromStoredRequest( self ):
if self.ShouldUseCache():
return super( OmniCompleter, self ).CandidatesFromStoredRequest()
else:
return self.CandidatesFromStoredRequestInner()
def CandidatesFromStoredRequestInner( self ):
return self.stored_candidates if self.stored_candidates else []
| {
"content_hash": "c8a60f111e24521b2dbb32a6b3041c8e",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 80,
"avg_line_length": 28.172043010752688,
"alnum_prop": 0.6526717557251909,
"repo_name": "TaDaa/.vim",
"id": "66b6fff69491d68520e48baa0b289f818a201de5",
"size": "3400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dependencies/windows/ycm/ycm/python/ycm/completers/all/omni_completer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "54093"
},
{
"name": "Python",
"bytes": "14144"
},
{
"name": "Shell",
"bytes": "122"
},
{
"name": "Vim script",
"bytes": "24769"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class DefFunctionTest(test.TestCase):
def testAutoclusteringWithTfFunction(self):
@def_function.function(experimental_compile=False)
def outer(a, b, c):
return a * inner(b, c) + c
@def_function.function(experimental_compile=True)
def inner(b, c):
return b + c * b
i1 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i2 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i3 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
with context.collect_graphs(optimized=True) as graphs:
outer(i1, i2, i3)
self.assertIn('_XlaRun', [n.op for n in graphs[0].node])
def testBasic(self):
def fn(x, a):
return x + a
func = def_function.function(fn, experimental_compile=False)
xla_func = def_function.function(fn, experimental_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([2, 3, 3, 4, 4], func(inputs, 1))
if not test.is_built_with_rocm():
# XLA support is not yet enabled for TF ROCm
self.assertAllClose([2, 3, 3, 4, 4], xla_func(inputs, 1))
def testBasicInt32(self):
def fn(x, a):
return x + a
xla_func = def_function.function(fn, experimental_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3], dtype=dtypes.int32)
if not test.is_built_with_rocm():
# XLA support is not yet enabled for TF ROCm
self.assertAllClose([2, 3, 3, 4, 4], xla_func(inputs, 1))
def testDerivative(self):
if test.is_built_with_rocm():
return
def fn(x, a):
return 2 * x + a
xla_func = def_function.function(fn, experimental_compile=True)
with backprop.GradientTape() as tape:
inputs = constant_op.constant([1., 2., 2., 3., 3.])
tape.watch(inputs)
outputs = xla_func(inputs, 1)
self.assertAllClose([2, 2, 2, 2, 2], tape.gradient(outputs, inputs))
# pylint: disable=protected-access
(forward, backward) = xla_func.get_concrete_function(
inputs, 1)._delayed_rewrite_functions.forward_backward()
# Check that the must-compile attribute gets correctly propagated to the
# created derivatives.
self.assertTrue(backward.function_def.attr['_XlaMustCompile'])
self.assertTrue(forward.definition.attr['_XlaMustCompile'])
# Calling function with experimental_compile=True from
# experimental_compile=False should compile the inner func.
def testNestedCall(self):
def fn(x, a):
return x + a
xla_func = def_function.function(fn, experimental_compile=True)
def fn2(x, a):
return xla_func(x, a)
func = def_function.function(fn2, experimental_compile=False)
inputs = constant_op.constant([1, 2, 2, 3, 3])
if not test.is_built_with_rocm():
# XLA support is not yet enabled for TF ROCm
self.assertAllClose([2, 3, 3, 4, 4], func(inputs, 1))
def testNestedCallUnsupportedOps(self):
def fn(x):
return array_ops.unique(x).y
xla_func = def_function.function(fn, experimental_compile=True)
def fn2(x):
return xla_func(x)
func = def_function.function(fn2, experimental_compile=False)
inputs = constant_op.constant([1, 2, 2, 3, 3])
if not test.is_built_with_rocm():
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'not compilable'):
func(inputs)
def testUnsupportedOps(self):
def fn(x):
return array_ops.unique(x).y # Unique is not supported by XLA
func = def_function.function(fn, experimental_compile=False)
xla_func = def_function.function(fn, experimental_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([1, 2, 3], func(inputs))
with self.assertRaisesRegexp(errors.InvalidArgumentError, 'not compilable'):
xla_func(inputs)
def testFunctionGradient(self):
v = resource_variable_ops.ResourceVariable(2.0)
def fn(x):
return v * x
func = def_function.function(fn, experimental_compile=False)
xla_func = def_function.function(fn, experimental_compile=True)
def run_and_check(test_func):
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = test_func(x)
dy = tape.gradient(y, v)
self.assertAllClose(6.0, y)
self.assertAllClose(3.0, dy)
run_and_check(func)
if not test.is_built_with_rocm():
# XLA support is not yet enabled for TF ROCm
run_and_check(xla_func)
def testControlFlow(self):
@def_function.function(experimental_compile=True)
def f(x):
assert control_flow_util.GraphOrParentsInXlaContext(
ops.get_default_graph())
x = ops.convert_to_tensor(x)
def body(i, a):
return i + 1, control_flow_ops.cond(i > 2, lambda: a + (x**2),
lambda: a + 3)
return control_flow_ops.while_loop(
lambda i, *_: i < 10,
body, (constant_op.constant(0), constant_op.constant(3.)),
maximum_iterations=10)[1]
@def_function.function(experimental_compile=True)
def g(x):
x = ops.convert_to_tensor(x)
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
return y, tape.gradient(y, x)
self.assertAllClose(40.0, f(2.0))
self.assertAllClose([40.0, 28.0], g(2.0))
def testMethodCompilation(self):
if test.is_built_with_rocm():
return
class C(object):
@def_function.function(experimental_compile=True)
def f1(self, x, a):
return x + a
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
self.assertAllClose([2, 3, 3, 4, 4], c.f1(inputs, 1))
def testMethodCompilationUnsupportedFunc(self):
if test.is_built_with_rocm():
return
class C(object):
@def_function.function(experimental_compile=True)
def f1(self, x):
return array_ops.unique(x).y
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
with self.assertRaisesRegexp(errors.InvalidArgumentError, 'not compilable'):
c.f1(inputs)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| {
"content_hash": "76addc534983987496c9dabe33eadfcb",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 80,
"avg_line_length": 30.43946188340807,
"alnum_prop": 0.6526222746022392,
"repo_name": "renyi533/tensorflow",
"id": "16d57ef36da9ef2cd1b3b68ef08cd9f6765167e1",
"size": "7478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/eager/def_function_xla_jit_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31572"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "903309"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82507951"
},
{
"name": "CMake",
"bytes": "6967"
},
{
"name": "Dockerfile",
"bytes": "113964"
},
{
"name": "Go",
"bytes": "1871425"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "988219"
},
{
"name": "Jupyter Notebook",
"bytes": "550861"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "2073744"
},
{
"name": "Makefile",
"bytes": "66796"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "319021"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37811412"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "6846"
},
{
"name": "Shell",
"bytes": "696058"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3655758"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""
@version: python.3.6
@author: zhangjiaheng
@software: PyCharm
@time: 2017/9/20 20:48
"""
from models import myunit,function
from test_case.page_obj import landlord_serach_page,landlord_nav_page,login_page
import unittest
from time import sleep
class TestLandlordSerach(myunit.MyTest):
'''搜索'''
def test_date_serach(self):
'''按日期搜索'''
login_page.LoginPage(self.driver).login()
sleep(3)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
sleep(4)
po = landlord_serach_page.LandlordSerachPage(self.driver)
po.beginCheckInDay()
po.endCheckInDay()
sleep(2)
po.serach()
sleep(3)
function.insert_img(self.driver,"date_serach.png")
def test_orderid_or_phone(self):
'''按手机号或订单号搜索'''
# login_page.LoginPage(self.driver).login()
# sleep(3)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
# landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
# sleep(4)
po = landlord_serach_page.LandlordSerachPage(self.driver)
list = ["18701016443","853519722 "]
for orderOrMoblie in list:
po.orderOrMoblie(orderOrMoblie)
sleep(2)
po.serach()
function.insert_img(self.driver,orderOrMoblie+".png")
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "70893a3086f1e9225aed85cd186c3d91",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 80,
"avg_line_length": 31.04255319148936,
"alnum_prop": 0.6360520904729267,
"repo_name": "18701016443/mayi",
"id": "bbc637f5a8f5e01763654636330ec921e6bf24da",
"size": "1534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mayi/test_case/test_landlord_serach.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "654975"
},
{
"name": "Python",
"bytes": "162019"
}
],
"symlink_target": ""
} |
from flask import request
class WebSocket(object):
def __init__(self, app=None, prefix='/ws'):
self.app = app
self.init_app(app)
def init_app(self, app):
app.add_url_rule(self.prefix, 'ws', self._handler)
def on_message(self, msg):
def decorator(fn):
pass
return f
return
def on_open(self):
def decorator(fn):
pass
return f
return
def on_close(self):
def decorator(fn):
pass
return f
return
def on_error(self):
def decorator(fn):
pass
return f
return
def send(self):
pass
def _handler(self):
pass | {
"content_hash": "1b6ad2e5b80b84a906b546f39cddd250",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 58,
"avg_line_length": 18.097560975609756,
"alnum_prop": 0.48787061994609165,
"repo_name": "bulbulpaul/flask-websocket",
"id": "9e84fb114244f08c094c1affa500791160d2c91c",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "websocket/flask_websocket.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3075"
}
],
"symlink_target": ""
} |
from .app_session import APP_URL, ADMIN_CREDENTIALS, ROOT_CREDENTIALS, get_requests_app_cookies | {
"content_hash": "8d18f291e07b765bf36be05bc7e6f493",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 95,
"avg_line_length": 95,
"alnum_prop": 0.8210526315789474,
"repo_name": "KorolevskyMax/TestFrameworkTemplate",
"id": "eff8b0ee2cd269f4aa5e9f5f6cde6024d8a6b05e",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers/app_helpers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "575"
},
{
"name": "Python",
"bytes": "17024"
}
],
"symlink_target": ""
} |
import json
from pyspark.sql import Row
from pyspark.sql.functions import lit
from pyspark.sql.functions import udf
from pyspark.sql.types import *
import numpy as np
import datetime
class ConvertUtils(object):
@staticmethod
def convert_meas_value(df, destination):
if destination == "measurements_cleansed":
df['meas_value_d'] = np.nan
df['meas_value_l'] = np.nan
df['meas_value_s'] = ""
df['meas_upper_limit_d'] = np.nan
df['meas_upper_limit_l'] = np.nan
df['meas_lower_limit_d'] = np.nan
df['meas_lower_limit_l'] = np.nan
for index, row in df.iterrows():
if row['meas_datatype'] == "double":
ConvertUtils.try_set_cell_with_float_value_if_not_use_string(
df, index, 'meas_value_d', row, 'meas_value', 'meas_value_s')
elif row['meas_datatype'] == "long":
ConvertUtils.try_set_cell_with_long_value_if_not_use_string(
df, index, 'meas_value_l', row, 'meas_value', 'meas_value_s')
elif row['meas_datatype'] == "string":
ConvertUtils.try_set_cell_with_string_value(
df, index, 'meas_value_s', row, 'meas_value')
else:
ConvertUtils.try_set_cell_with_float_value_if_not_use_string(
df, index, 'meas_value_d', row, 'meas_value', 'meas_value_s')
if row['meas_datatype'] == "long" and ConvertUtils.is_long_number(
row['meas_upper_limit']) and ConvertUtils.is_long_number(
row['meas_lower_limit']):
ConvertUtils.try_set_cell_with_long_value(
df, index, 'meas_upper_limit_l', row, 'meas_upper_limit')
ConvertUtils.try_set_cell_with_long_value(
df, index, 'meas_lower_limit_l', row, 'meas_lower_limit')
else:
ConvertUtils.try_set_cell_with_float_value(
df, index, 'meas_upper_limit_d', row, 'meas_upper_limit')
ConvertUtils.try_set_cell_with_float_value(
df, index, 'meas_lower_limit_d', row, 'meas_lower_limit')
df = df.drop('meas_value', 1)
df = df.drop('meas_upper_limit', 1)
df = df.drop('meas_lower_limit', 1)
df['meas_value'] = df['meas_value_d']
df['meas_upper_limit'] = df['meas_upper_limit_d']
df['meas_lower_limit'] = df['meas_lower_limit_d']
df = df.drop('meas_upper_limit_d', 1)
df = df.drop('meas_lower_limit_d', 1)
df = df.drop('meas_value_d', 1)
return df.where(df.notnull(), None)
else:
return df
@staticmethod
def is_long_number(val):
try:
return long(float(val)) == float(val)
except BaseException:
return False
@staticmethod
def try_set_cell_with_float_value_if_not_use_string(
df, index, column_name, row, column_value_name, column_name_str):
try:
value = float(row[column_value_name])
df.set_value(index, column_name, value)
except BaseException:
ConvertUtils.try_set_cell_with_string_value(
df, index, column_name_str, row, column_value_name)
@staticmethod
def try_set_cell_with_float_value(
df, index, column_name, row, column_value_name):
try:
value = float(row[column_value_name])
df.set_value(index, column_name, value)
except BaseException:
df.set_value(index, column_name, np.nan)
@staticmethod
def try_set_cell_with_long_value(
df, index, column_name, row, column_value_name):
try:
value = long(float(row[column_value_name]))
df.set_value(index, column_name, value)
except BaseException:
df.set_value(index, column_name, np.nan)
@staticmethod
def try_set_cell_with_long_value_if_not_use_string(
df, index, column_name, row, column_value_name, column_name_str):
try:
value = long(float(row[column_value_name]))
df.set_value(index, column_name, value)
except BaseException:
ConvertUtils.try_set_cell_with_string_value(
df, index, column_name_str, row, column_value_name)
@staticmethod
def try_set_cell_with_string_value(
df, index, column_name, row, column_value_name):
try:
value = str(row[column_value_name])
if value == "":
df.set_value(index, column_name, np.nan)
else:
df.set_value(index, column_name, value)
except BaseException:
df.set_value(index, column_name, np.nan)
@staticmethod
def convert_to_pandas_dataframe_model(rdd_df, clean_up=True):
df = rdd_df \
.toPandas()
df['meas_value'] = df['meas_value_str']
df['meas_upper_limit'] = np.nan
df['meas_lower_limit'] = np.nan
for index, row in df.iterrows():
if row['meas_value_datatype'] == "long":
df.set_value(index, 'meas_value', row['meas_value_l'])
elif row['meas_value_datatype'] == "double":
df.set_value(index, 'meas_value', row['meas_value_d'])
else:
# since we set double as default
pass
if not np.isnan(row['meas_upper_limit_l']):
df.set_value(
index,
'meas_upper_limit',
row['meas_upper_limit_l'])
elif not np.isnan(row['meas_upper_limit_d']):
df.set_value(
index,
'meas_upper_limit',
row['meas_upper_limit_d'])
else:
pass
if not np.isnan(row['meas_lower_limit_l']):
df.set_value(
index,
'meas_lower_limit',
row['meas_lower_limit_l'])
elif not np.isnan(row['meas_lower_limit_d']):
df.set_value(
index,
'meas_lower_limit',
row['meas_lower_limit_d'])
else:
pass
if clean_up and not row['meas_value_datatype'] == "unknown":
df.set_value(
index,
'meas_datatype',
row['meas_value_datatype'])
if row['meas_value_datatype'] == "string" and row['meas_value_str'] != "":
df.set_value(index, 'meas_upper_limit', np.nan)
df.set_value(index, 'meas_lower_limit', np.nan)
df.set_value(index, 'meas_unit', "")
df = df.drop('meas_value_str', 1)
df = df.drop('meas_value_d', 1)
df = df.drop('meas_value_l', 1)
df = df.drop('meas_value_datatype', 1)
df = df.drop('meas_upper_limit_d', 1)
df = df.drop('meas_upper_limit_l', 1)
df = df.drop('meas_lower_limit_d', 1)
df = df.drop('meas_lower_limit_l', 1)
return df.where(df.notnull(), None)
| {
"content_hash": "3d278292484f28dfc99b72451cf839bd",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 90,
"avg_line_length": 39.8054054054054,
"alnum_prop": 0.512221618685497,
"repo_name": "epidataio/epidata-community",
"id": "23fe93cf0ea3f12942a580f8e68b0d3f270da3f5",
"size": "7409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipython/epidata/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7246"
},
{
"name": "C#",
"bytes": "84"
},
{
"name": "CSS",
"bytes": "456"
},
{
"name": "HTML",
"bytes": "729037"
},
{
"name": "JavaScript",
"bytes": "8195"
},
{
"name": "Jupyter Notebook",
"bytes": "682745"
},
{
"name": "Less",
"bytes": "1230"
},
{
"name": "Makefile",
"bytes": "7413"
},
{
"name": "Python",
"bytes": "1236504"
},
{
"name": "Scala",
"bytes": "1013142"
},
{
"name": "Shell",
"bytes": "8175"
}
],
"symlink_target": ""
} |
import cgi
import os
import pyauto_functional # Must be imported before pyauto
import pyauto
class InstantSettingsTest(pyauto.PyUITest):
"""Test Chrome Instant settings."""
def testEnableDisableInstant(self):
"""Test to verify default Chrome Instant setting.
Check if the setting can be enabled and disabled."""
self.assertFalse(self.GetPrefsInfo().Prefs(pyauto.kInstantEnabled),
msg='Instant is enabled by default.')
# Enable instant.
self.SetPrefs(pyauto.kInstantEnabled, True)
self.assertTrue(self.GetPrefsInfo().Prefs(pyauto.kInstantEnabled),
msg='Instant is not enabled.')
self.SetOmniboxText('google.com')
self.assertTrue(self.WaitUntil(
lambda: self.GetInstantInfo().get('current') and not
self.GetInstantInfo().get('loading')))
title = self.GetInstantInfo()['title']
self.assertEqual('Google', title, msg='Instant did not load.')
# Disable Instant.
self.SetPrefs(pyauto.kInstantEnabled, False)
self.assertFalse(self.GetInstantInfo()['enabled'],
msg='Instant is not disabled.')
class InstantTest(pyauto.PyUITest):
"""TestCase for Omnibox Instant feature."""
def setUp(self):
pyauto.PyUITest.setUp(self)
self.SetPrefs(pyauto.kInstantEnabled, True)
def _DoneLoading(self):
info = self.GetInstantInfo()
return info.get('current') and not info.get('loading')
def _DoneLoadingGoogleQuery(self, query):
"""Wait for Omnibox Instant to load Google search result
and verify location URL contains the specifed query.
Args:
query: Value of query parameter.
E.g., http://www.google.com?q=hi so query is 'hi'.
"""
self.assertTrue(self.WaitUntil(self._DoneLoading))
location = self.GetInstantInfo().get('location')
if location is not None:
q = cgi.parse_qs(location).get('q')
if q is not None and query in q:
return True
return False
def testInstantNavigation(self):
"""Test that instant navigates based on omnibox input."""
self.SetOmniboxText('google.com')
self.assertTrue(self.WaitUntil(self._DoneLoading))
location = self.GetInstantInfo()['location']
self.assertTrue('google.com' in location,
msg='No google.com in %s' % location)
self.SetOmniboxText('google.es')
self.assertTrue(self.WaitUntil(self._DoneLoading))
location = self.GetInstantInfo()['location']
self.assertTrue('google.es' in location,
msg='No google.es in %s' % location)
# Initiate instant search (at default google.com).
self.SetOmniboxText('chrome instant')
self.assertTrue(self.WaitUntil(self._DoneLoading))
location = self.GetInstantInfo()['location']
self.assertTrue('google.com' in location,
msg='No google.com in %s' % location)
def testInstantCaseSensitivity(self):
"""Verify that Chrome Instant results case insensitive."""
# Text in lowercase letters.
self.SetOmniboxText('google')
self.assertTrue(self.WaitUntil(self._DoneLoading))
lowercase_instant_info = self.GetInstantInfo()
# Text in uppercase letters.
self.SetOmniboxText('GOOGLE')
self.assertTrue(self.WaitUntil(self._DoneLoading))
uppercase_instant_info = self.GetInstantInfo()
# Check lowercase and uppercase text results are same.
self.assertEquals(lowercase_instant_info, uppercase_instant_info,
msg='Lowercase and Uppercase instant info doesn\'t match')
# Text in mixed case letters.
self.SetOmniboxText('GooGle')
self.assertTrue(self.WaitUntil(self._DoneLoading))
mixedcase_instant_info = self.GetInstantInfo()
# Check mixedcase and uppercase text results are same.
self.assertEquals(mixedcase_instant_info, uppercase_instant_info,
msg='Mixedcase and Uppercase instant info doesn\'t match')
def testInstantWithSearchEngineOtherThanGoogle(self):
"""Verify that Instant is inactive for search engines other than Google."""
# Check with Yahoo!.
self.MakeSearchEngineDefault('yahoo.com')
self.assertFalse(self.GetInstantInfo()['active'],
msg='Instant is active for Yahoo!')
# Check with Bing.
self.MakeSearchEngineDefault('bing.com')
self.assertFalse(self.GetInstantInfo()['active'],
msg='Instant is active for Bing.')
def testInstantDisabledInIncognito(self):
"""Test that instant is disabled in Incognito mode."""
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.SetOmniboxText('google.com', windex=1)
self.assertFalse(self.GetInstantInfo()['active'],
'Instant enabled in Incognito mode.')
def testInstantOverlayNotStoredInHistory(self):
"""Test that instant overlay page is not stored in history."""
url = self.GetFileURLForDataPath('title2.html')
self.SetOmniboxText(url)
self.assertTrue(self.WaitUntil(self._DoneLoading))
history = self.GetHistoryInfo().History()
self.assertEqual(0, len(history))
def testInstantDisabledForJavaScript(self):
"""Test that instant is disabled for javascript URLs."""
self.SetOmniboxText('javascript:')
self.assertFalse(self.GetInstantInfo()['active'],
'Instant enabled for javascript URL.')
def testInstantDisablesPopupsOnPrefetch(self):
"""Test that instant disables popups when prefetching."""
file_url = self.GetFileURLForDataPath(
'popup_blocker', 'popup-blocked-to-post-blank.html')
self.SetOmniboxText(file_url)
self.assertTrue(self.WaitUntil(self._DoneLoading))
location = self.GetInstantInfo()['location']
self.assertTrue(file_url in location,
msg='Prefetched page is not %s' % file_url)
blocked_popups = self.GetBlockedPopupsInfo()
self.assertEqual(0, len(blocked_popups),
msg='Unexpected popup in instant preview.')
def testInstantLoadsFor100CharsLongQuery(self):
"""Test that instant loads for search query of 100 characters."""
query = '#' * 100
self.SetOmniboxText(query)
self.assertTrue(self.WaitUntil(self._DoneLoadingGoogleQuery, args=[query]))
def _BringUpInstant(self):
"""Helper function to bring up instant."""
file_path = os.path.join(os.path.abspath(self.DataDir()),
'google', 'google.html')
self.SetOmniboxText(self.GetFileURLForPath(file_path))
self.assertTrue(self.WaitUntil(self._DoneLoading))
self.assertTrue('google.html' in self.GetInstantInfo()['location'],
msg='No google.html in %s' %
self.GetInstantInfo()['location'])
def testFindInCanDismissInstant(self):
"""Test that instant preview is dismissed by find-in-page."""
self._BringUpInstant()
self.OpenFindInPage()
self.assertEqual(self.GetActiveTabTitle(), 'about:blank')
def testNTPCanDismissInstant(self):
"""Test that instant preview is dismissed by adding new tab page."""
self._BringUpInstant()
self.AppendTab(pyauto.GURL('chrome://newtab'))
self.GetBrowserWindow(0).GetTab(1).Close(True)
self.assertEqual(self.GetActiveTabTitle(), 'about:blank')
def testExtnPageCanDismissInstant(self):
"""Test that instant preview is dismissed by extension page."""
self._BringUpInstant()
self.AppendTab(pyauto.GURL('chrome://extensions'))
self.GetBrowserWindow(0).GetTab(1).Close(True)
self.assertEqual(self.GetActiveTabTitle(), 'about:blank')
def testNewWindowCanDismissInstant(self):
"""Test that instant preview is dismissed by New Window."""
self._BringUpInstant()
self.OpenNewBrowserWindow(True)
self.CloseBrowserWindow(1)
self.assertEqual(self.GetActiveTabTitle(), 'about:blank')
def _AssertInstantDoesNotDownloadFile(self, path):
"""Asserts instant does not download the specified file.
Args:
path: Path to file.
"""
self.NavigateToURL('chrome://downloads')
filepath = self.GetFileURLForDataPath(path)
self.SetOmniboxText(filepath)
self.WaitUntilOmniboxQueryDone()
self.WaitForAllDownloadsToComplete()
self.assertFalse(self.GetDownloadsInfo().Downloads(),
msg='Should not download: %s' % filepath)
def testInstantDoesNotDownloadZipFile(self):
"""Test that instant does not download zip file."""
self._AssertInstantDoesNotDownloadFile(os.path.join('zip', 'test.zip'))
def testInstantDoesNotDownloadPDFFile(self):
"""Test that instant does not download PDF file."""
self._AssertInstantDoesNotDownloadFile(os.path.join('printing',
'cloud_print_unittest.pdf'))
def _AssertInstantLoadsFile(self, path):
"""Asserts instant loads the specified file.
Args:
path: Path to file.
"""
filepath = self.GetFileURLForDataPath(path)
error = 'Failed to load: %s' % filepath
self.SetOmniboxText(filepath)
self.assertTrue(self.WaitUntil(self._DoneLoading), msg=error)
self.assertEqual(self.GetInstantInfo()['location'], filepath, msg=error)
def testInstantLoadsGIF(self):
"""Test that instant loads GIF file."""
self._AssertInstantLoadsFile(os.path.join('animate1.gif'))
def testInstantLoadsJPEG(self):
"""Test that instant loads JPEG file."""
self._AssertInstantLoadsFile(os.path.join('gpu', 'webgl_teapot',
'bump.jpg'))
def testInstantLoadsPNG(self):
"""Test that instant loads PNG file."""
self._AssertInstantLoadsFile(os.path.join('save_page', '1.png'))
def testInstantLoadsSVG(self):
"""Test that instant loads SVG file."""
self._AssertInstantLoadsFile(os.path.join('circle.svg'))
if __name__ == '__main__':
pyauto_functional.Main()
| {
"content_hash": "294ded4429ca4f3828041ab2c6be3492",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 79,
"avg_line_length": 40.23456790123457,
"alnum_prop": 0.6854863455047561,
"repo_name": "Crystalnix/house-of-life-chromium",
"id": "abc108874791117723825dcc74c6e31c0e4a73ae",
"size": "9963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chrome/test/functional/instant.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "3418"
},
{
"name": "C",
"bytes": "88445923"
},
{
"name": "C#",
"bytes": "73756"
},
{
"name": "C++",
"bytes": "77228136"
},
{
"name": "Emacs Lisp",
"bytes": "6648"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Go",
"bytes": "3744"
},
{
"name": "Java",
"bytes": "11354"
},
{
"name": "JavaScript",
"bytes": "6191433"
},
{
"name": "Objective-C",
"bytes": "4023654"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "Perl",
"bytes": "92217"
},
{
"name": "Python",
"bytes": "5604932"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Shell",
"bytes": "1234672"
},
{
"name": "Tcl",
"bytes": "200213"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from datetime import datetime
from scopus.classes import Retrieval
class CitationOverview(Retrieval):
@property
def authors(self):
"""A list of namedtuples storing author information,
where each namedtuple corresponds to one author.
The information in each namedtuple is (name surname initials id url).
All entries are strings.
"""
out = []
order = 'name surname initials id url'
auth = namedtuple('Author', order)
for author in self._citeInfoMatrix.get('author'):
author = {k.split(":", 1)[-1]: v for k, v in author.items()}
new = auth(name=author.get('index-name'), id=author.get('authid'),
surname=author.get('surname'),
initials=author.get('initials'),
url=author.get('author-url'))
out.append(new)
return out or None
@property
def cc(self):
"""List of tuples of yearly number of citations
for specified years."""
_years = range(self._start, self._end+1)
try:
return list(zip(_years, [d.get('$') for d in self._citeInfoMatrix['cc']]))
except AttributeError: # No citations
return list(zip(_years, [0]*len(_years)))
@property
def citationType_long(self):
"""Type (long version) of the abstract (e.g. article, review)."""
return self._citeInfoMatrix.get('citationType', {}).get('$')
@property
def citationType_short(self):
"""Type (short version) of the abstract (e.g. ar, re)."""
return self._citeInfoMatrix.get('citationType', {}).get('@code')
@property
def doi(self):
"""Document Object Identifier (DOI) of the abstract."""
return self._identifierlegend.get('doi')
@property
def endingPage(self):
"""Ending page."""
return self._citeInfoMatrix.get('endingPage')
@property
def h_index(self):
"""h-index of ciations of the abstract (according to Scopus)."""
return self._data['h-index']
@property
def issn(self):
"""ISSN of the publisher.
Note: If E-ISSN is known to Scopus, this returns both
ISSN and E-ISSN in random order separated by blank space.
"""
return self._citeInfoMatrix.get('issn')
@property
def issueIdentifier(self):
"""Issue number for abstract."""
return self._citeInfoMatrix.get('issueIdentifier')
@property
def lcc(self):
"""Number of citations the abstract received
after the specified end year.
"""
return self._citeInfoMatrix.get('lcc')
@property
def pcc(self):
"""Number of citations the abstract received
before the specified start year.
"""
return self._citeInfoMatrix.get('pcc')
@property
def pii(self):
"""The Publication Item Identifier (PII) of the abstract."""
return self._identifierlegend.get('pii')
@property
def publicationName(self):
"""Name of source the abstract is published in (e.g. the Journal)."""
return self._citeInfoMatrix.get('publicationName')
@property
def scopus_id(self):
"""The Scopus ID of the abstract. It is the second part of an EID.
The Scopus ID might differ from the one provided.
"""
return self._identifierlegend.get('scopus_id')
@property
def startingPage(self):
"""Starting page."""
return self._citeInfoMatrix.get('startingPage')
@property
def rangeCount(self):
"""Number of citations for specified years."""
return self._citeInfoMatrix.get('rangeCount')
@property
def rowTotal(self):
"""Number of citations (specified and omitted years)."""
return self._citeInfoMatrix.get('rowTotal')
@property
def title(self):
"""Abstract title."""
return self._citeInfoMatrix.get('title')
@property
def url(self):
"""URL to Citation Overview API view of the abstract."""
return self._citeInfoMatrix.get('url')
@property
def volume(self):
"""Volume for the abstract."""
return self._citeInfoMatrix.get('volume')
def __init__(self, eid, start, end=datetime.now().year, refresh=False):
"""Class to represent the results from a Scopus Citation Overview.
See https://api.elsevier.com/documentation/guides/AbstractCitationViews.htm.
Parameters
----------
eid : str
The EID of the abstract.
start : str or int
The first year for which the citation count should be loaded
end : str or int (optional, default=datetime.now().year)
The last year for which the citation count should be loaded.
Default is the current year.
refresh : bool (optional, default=False)
Whether to refresh the cached file if it exists or not.
Notes
-----
The files are cached in ~/.scopus/citation_overview/STANDARD/{eid}.
Your API Key needs to be approved by Elsevier to access this API.
"""
# Variables
self._start = int(start)
self._end = int(end)
view = "STANDARD" # In case Scopus adds different views in future
# Get file content
date = '{}-{}'.format(start, end)
Retrieval.__init__(self, eid, 'CitationOverview', refresh, view=view,
date=date)
self._data = self._json['abstract-citations-response']
# citeInfoMatrix
m = self._data['citeInfoMatrix']['citeInfoMatrixXML']['citationMatrix']['citeInfo'][0]
self._citeInfoMatrix = _parse_dict(m)
# identifier-legend
l = self._data['identifier-legend']['identifier'][0]
self._identifierlegend = _parse_dict(l)
# citeColumnTotalXML
self._citeColumnTotalXML = self._data['citeColumnTotalXML'] # not used
def __str__(self):
"""Return a summary string."""
authors = [a.name for a in self.authors]
if len(authors) > 1:
authors[-1] = " and ".join([authors[-2], authors[-1]])
s = "Document '{self.title}' by {authors} published in "\
"'{self.publicationName}' has the following citation trajectory "\
"for years {self._start} to {self._end}:\n"\
"{self.cc}\n"\
"Additionally cited {self.pcc} times before {self._start}, and "\
"{self.lcc} times after {self._end}".format(
self=self, authors=", ".join(authors))
return s
def _parse_dict(dct):
"""Auxiliary function to change the keys of a dictionary."""
return {k.split(":", 1)[-1]: v for k, v in dct.items()}
| {
"content_hash": "12c6f57bb5dd4496acb1d1dbed409ae3",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 94,
"avg_line_length": 34.63959390862944,
"alnum_prop": 0.5943728018757327,
"repo_name": "scopus-api/scopus",
"id": "7cfb19dc03b943ee876da79e64ee6355ef17eec7",
"size": "6824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scopus/abstract_citations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133243"
}
],
"symlink_target": ""
} |
import sys, os
import pygtk, gtk, gobject
import pygst
pygst.require("0.10")
import gst
class CamDesk(gtk.Window):
def closeme(self, widget, event) :
if event.keyval == gtk.keysyms.Escape :
gtk.main_quit()
def startme(self, widget, event) :
if event.keyval == gtk.keysyms.F1 :
self.player.set_state(gst.STATE_PLAYING)
def stopme(self, widget, event) :
if event.keyval == gtk.keysyms.F2 :
self.player.set_state(gst.STATE_NULL)
def properties(self, widget, event) :
if event.keyval == gtk.keysyms.F5 :
self.win = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.win.set_title("Properties")
self.win.set_size_request(320, 120)
self.win.set_resizable(False)
self.win.set_keep_above(True)
self.win.set_property('skip-taskbar-hint', True)
self.win.connect("destroy", self.closeproperties)
vbox = gtk.VBox(spacing=4)
hbox = gtk.HBox(spacing=4)
hbox2 = gtk.HBox(spacing=4)
check = gtk.CheckButton("Pin")
check.set_active(True)
check.set_size_request(100, 35)
check.connect("clicked", self.pinning)
hbox.pack_start(check)
scale = gtk.HScale()
scale.set_range(0, 100)
scale.set_value(100)
scale.set_size_request(320, 35)
scale.connect("value-changed", self.opac_slider)
hbox.pack_start(scale)
self.entry = gtk.Entry()
self.entry2 = gtk.Entry()
self.entry.set_text("width")
self.entry2.set_text("height")
hbox2.pack_start(self.entry)
hbox2.pack_start(self.entry2)
hbox3 = gtk.HBox(spacing=4)
ok = gtk.Button("OK")
ok.connect("clicked", self.change_size)
hbox3.pack_start(ok)
exit = gtk.Button("Exit")
exit.connect("clicked", self.closeproperties)
hbox3.pack_start(exit)
vbox.pack_start(hbox)
vbox.pack_start(hbox2)
vbox.pack_start(hbox3)
self.win.add(vbox)
self.win.show_all()
def pinning(self, checkbox):
if checkbox.get_active():
self.set_keep_above(True)
else:
self.set_keep_above(False)
def opac_slider(self, w):
self.set_opacity(w.get_value()/100.0)
def change_size(self, w):
width = int(self.entry.get_text())
height = int(self.entry2.get_text())
self.set_size_request(width,height)
def closeproperties(self, w):
self.win.hide()
def __init__(self):
super(CamDesk, self).__init__(gtk.WINDOW_TOPLEVEL)
self.set_position(gtk.WIN_POS_CENTER)
# display at bottom right
self.set_gravity(gtk.gdk.GRAVITY_SOUTH_EAST)
self.move(gtk.gdk.screen_width(), gtk.gdk.screen_height())
self.stick()
self.set_title("CamDesk")
self.set_decorated(False)
self.set_has_frame(False)
self.set_size_request(320, 240)
self.set_resizable(False)
self.set_keep_above(True)
self.set_property('skip-taskbar-hint', True)
gtk.window_set_default_icon_from_file('logo.png')
self.connect("destroy", gtk.main_quit, "WM destroy")
self.connect("key-press-event", self.closeme)
self.connect("key-press-event", self.startme)
self.connect("key-press-event", self.stopme)
self.connect("key-press-event", self.properties)
self.movie_window = gtk.DrawingArea()
self.movie_window.set_double_buffered(False)
self.add(self.movie_window)
self.show_all()
# Set up the gstreamer pipeline
self.player = gst.parse_launch ("v4l2src ! videoflip method=horizontal-flip ! autovideosink")
bus = self.player.get_bus()
bus.add_signal_watch()
bus.enable_sync_message_emission()
bus.connect("message", self.on_message)
bus.connect("sync-message::element", self.on_sync_message)
self.player.set_state(gst.STATE_PLAYING)
def on_message(self, bus, message):
t = message.type
if t == gst.MESSAGE_EOS:
self.player.set_state(gst.STATE_NULL)
self.startcam.set_label("Start")
elif t == gst.MESSAGE_ERROR:
err, debug = message.parse_error()
print "Error: %s" % err, debug
self.player.set_state(gst.STATE_NULL)
self.startcam.set_label("Start")
def on_sync_message(self, bus, message):
if message.structure is None:
return
message_name = message.structure.get_name()
if message_name == "prepare-xwindow-id":
# Assign the viewport
imagesink = message.src
imagesink.set_property("force-aspect-ratio", True)
imagesink.set_xwindow_id(self.movie_window.window.xid)
CamDesk()
gtk.gdk.threads_init()
gtk.main()
| {
"content_hash": "ae5b1f54ad0b2da099998b065696ca4d",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 94,
"avg_line_length": 27.611842105263158,
"alnum_prop": 0.6978794376935906,
"repo_name": "arteymix/CamDesk",
"id": "31856e8ed69122887da16ba5ac304a7300d906e5",
"size": "4220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "camdesk.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4220"
}
],
"symlink_target": ""
} |
import argparse
import sys
import os
import os.path
import errno
import xml.etree.ElementTree as ET
from pkg_resources import resource_stream
# XML file storing one or more paths to the XML file containing the API keys
locations_filename = 'locations.xml'
locations_file = resource_stream('slcli.resources', locations_filename)
locations_xml = locations_file.read().decode('utf8')
class KeysNotFoundError(IOError):
def __init__(self, paths):
self.message = 'Could not find the API keys in any of these locations:'
self.message = self.message+'\n'+'\n'.join(paths)
self.paths = paths
super(KeysNotFoundError, self).__init__(self.message)
def get_keys():
keys = dict()
keys["platsuppslag"] = os.environ.get("PLATSUPPSLAG")
keys["reseplanerare3.1"] = os.environ.get("RESEPLANERARE")
if all(k is None for k in keys.values()):
return find_keys()
else:
return keys
def find_keys():
""" Reads API keys from the XML files referenced in the param xml """
root = ET.fromstring(locations_xml)
paths = [c for c in root if 'os' not in c.attrib or
c.attrib['os'] == os.name]
paths = [c.text for c in paths if c.tag == 'path']
resolvpaths = [os.path.expanduser(os.path.expandvars(p)) for p in paths]
for rpath in resolvpaths:
try:
dirpath = os.path.dirname(locations_filename)
fullpath = os.path.join(dirpath, rpath)
return read_keys(fullpath)
except OSError as e:
if e.errno == errno.ENOENT:
continue
else:
raise
raise KeysNotFoundError(resolvpaths)
def read_keys(path):
""" Reads API keys from the XML file specified by param path """
tree = ET.parse(path)
root = tree.getroot()
keys = {key.attrib['name']: key.text for key in root.iter('key')}
version_check_keys(keys, path)
return keys
def version_check_keys(keys, path):
""" Exit if the configuration needs to be updated """
unsupported = {'reseplanerare2', 'reseplanerare3'}
current = 'reseplanerare3.1'
for api in unsupported:
if api in keys:
msg = "\n".join([
"API:t '{}' har ersatts av {} och kommer inte att stödjas längre.".format(api, current),
"",
"Var vänlig beställ en ny API-nyckel för {} på trafiklab.se och uppdatera följande rad i din {} från:".format(current, path),
"",
'- <key name="reseplanerare3">min_gamla_nyckel</key>',
"",
"till:",
"",
'+ <key name="reseplanerare3.1">min_nya_nyckel</key>',
"",
"Avbryter..."
])
print(msg, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
keys = get_keys()
parser = argparse.ArgumentParser()
parser.add_argument('api', choices=list(keys))
args = parser.parse_args()
print(keys[args.api])
| {
"content_hash": "ac5da3cfc5bc60c2a4079d02c6e2646f",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 141,
"avg_line_length": 33.51111111111111,
"alnum_prop": 0.5944960212201591,
"repo_name": "Sebelino/SL-CLI",
"id": "2bcf6302e6bc0e22884b3c49acdcffd675ce1f08",
"size": "3071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slcli/keyreader.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "918"
},
{
"name": "Python",
"bytes": "33964"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import with_statement
import logging
import sys
from collections import defaultdict
from kombu.tests.utils import redirect_stdouts
from mock import patch
from celery import beat
from celery import platforms
from celery.app import app_or_default
from celery.bin import celerybeat as celerybeat_bin
from celery.apps import beat as beatapp
from celery.tests.utils import AppCase
class MockedShelveModule(object):
shelves = defaultdict(lambda: {})
def open(self, filename, *args, **kwargs):
return self.shelves[filename]
mocked_shelve = MockedShelveModule()
class MockService(beat.Service):
started = False
in_sync = False
persistence = mocked_shelve
def start(self):
self.__class__.started = True
def sync(self):
self.__class__.in_sync = True
class MockBeat(beatapp.Beat):
running = False
def run(self):
MockBeat.running = True
class MockBeat2(beatapp.Beat):
Service = MockService
def install_sync_handler(self, b):
pass
class MockBeat3(beatapp.Beat):
Service = MockService
def install_sync_handler(self, b):
raise TypeError('xxx')
class test_Beat(AppCase):
def test_loglevel_string(self):
b = beatapp.Beat(loglevel='DEBUG')
self.assertEqual(b.loglevel, logging.DEBUG)
b2 = beatapp.Beat(loglevel=logging.DEBUG)
self.assertEqual(b2.loglevel, logging.DEBUG)
def test_init_loader(self):
b = beatapp.Beat()
b.init_loader()
def test_process_title(self):
b = beatapp.Beat()
b.set_process_title()
def test_run(self):
b = MockBeat2()
MockService.started = False
b.run()
self.assertTrue(MockService.started)
def psig(self, fun, *args, **kwargs):
handlers = {}
class Signals(platforms.Signals):
def __setitem__(self, sig, handler):
handlers[sig] = handler
p, platforms.signals = platforms.signals, Signals()
try:
fun(*args, **kwargs)
return handlers
finally:
platforms.signals = p
def test_install_sync_handler(self):
b = beatapp.Beat()
clock = MockService()
MockService.in_sync = False
handlers = self.psig(b.install_sync_handler, clock)
with self.assertRaises(SystemExit):
handlers['SIGINT']('SIGINT', object())
self.assertTrue(MockService.in_sync)
MockService.in_sync = False
def test_setup_logging(self):
try:
# py3k
delattr(sys.stdout, 'logger')
except AttributeError:
pass
b = beatapp.Beat()
b.redirect_stdouts = False
b.app.log.__class__._setup = False
b.setup_logging()
with self.assertRaises(AttributeError):
sys.stdout.logger
@redirect_stdouts
@patch('celery.apps.beat.logger')
def test_logs_errors(self, logger, stdout, stderr):
b = MockBeat3(socket_timeout=None)
b.start_scheduler()
self.assertTrue(logger.critical.called)
@redirect_stdouts
@patch('celery.platforms.create_pidlock')
def test_use_pidfile(self, create_pidlock, stdout, stderr):
b = MockBeat2(pidfile='pidfilelockfilepid', socket_timeout=None)
b.start_scheduler()
self.assertTrue(create_pidlock.called)
class MockDaemonContext(object):
opened = False
closed = False
def __init__(self, *args, **kwargs):
pass
def open(self):
self.__class__.opened = True
return self
__enter__ = open
def close(self, *args):
self.__class__.closed = True
__exit__ = close
class test_div(AppCase):
def setup(self):
self.prev, beatapp.Beat = beatapp.Beat, MockBeat
self.ctx, celerybeat_bin.detached = (
celerybeat_bin.detached, MockDaemonContext)
def teardown(self):
beatapp.Beat = self.prev
def test_main(self):
sys.argv = [sys.argv[0], '-s', 'foo']
try:
celerybeat_bin.main()
self.assertTrue(MockBeat.running)
finally:
MockBeat.running = False
def test_detach(self):
cmd = celerybeat_bin.BeatCommand()
cmd.app = app_or_default()
cmd.run(detach=True)
self.assertTrue(MockDaemonContext.opened)
self.assertTrue(MockDaemonContext.closed)
def test_parse_options(self):
cmd = celerybeat_bin.BeatCommand()
cmd.app = app_or_default()
options, args = cmd.parse_options('celerybeat', ['-s', 'foo'])
self.assertEqual(options.schedule, 'foo')
| {
"content_hash": "ce2208418ced1da3cbff2c5f7240f7c9",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 72,
"avg_line_length": 25.527173913043477,
"alnum_prop": 0.6203959974451778,
"repo_name": "mozilla/firefox-flicks",
"id": "5fe35d3e89f89dc7784b0381848f64fd77d9a952",
"size": "4697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/celery/tests/bin/test_celerybeat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "68358"
},
{
"name": "HTML",
"bytes": "337116"
},
{
"name": "JavaScript",
"bytes": "44816"
},
{
"name": "Puppet",
"bytes": "6653"
},
{
"name": "Python",
"bytes": "4166155"
},
{
"name": "Shell",
"bytes": "2409"
}
],
"symlink_target": ""
} |
import json
import os
import subprocess
import sys
import tempfile
import unittest
class TestPlannerReLaunch(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_relaunch_with_planner(self):
from test_auto_parallel_relaunch import cluster_json, mapping_josn
cluster_json_path = os.path.join(
self.temp_dir.name, "auto_parallel_cluster.json"
)
mapping_json_path = os.path.join(
self.temp_dir.name, "auto_parallel_rank_mapping.json"
)
cluster_json_object = json.loads(cluster_json)
with open(cluster_json_path, "w") as cluster_json_file:
json.dump(cluster_json_object, cluster_json_file)
mapping_json_object = json.loads(mapping_josn)
with open(mapping_json_path, "w") as mapping_json_file:
json.dump(mapping_json_object, mapping_json_file)
file_dir = os.path.dirname(os.path.abspath(__file__))
launch_model_path = os.path.join(
file_dir, "auto_parallel_relaunch_with_planner.py"
)
if os.environ.get("WITH_COVERAGE", "OFF") == "ON":
coverage_args = ["-m", "coverage", "run", "--branch", "-p"]
else:
coverage_args = []
cmd = (
[sys.executable, "-u"]
+ coverage_args
+ [
"-m",
"paddle.distributed.launch",
"--log_dir",
self.temp_dir.name,
"--cluster_topo_path",
cluster_json_path,
"--rank_mapping_path",
mapping_json_path,
"--enable_auto_mapping",
"True",
launch_model_path,
]
)
process = subprocess.Popen(cmd)
process.wait()
self.assertEqual(process.returncode, 0)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "fdb846569a7f59515f92bff03a0ecb0b",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 74,
"avg_line_length": 29.73134328358209,
"alnum_prop": 0.5451807228915663,
"repo_name": "PaddlePaddle/Paddle",
"id": "16b6016458c38d2ee8ef96c774987618aeb27381",
"size": "2603",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/auto_parallel/test_relaunch_with_planner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
} |
import sys
sys.path.insert(0, '../')
import ply.lex as lex
import ply.yacc as yacc
from pymag_trees.buchheim import buchheim_layout
from pymag_trees.gen import Tree
import ctokens
import cgrammar
def lexer(text):
clexer = lex.lex(module=ctokens)
clexer.input(text)
result = []
tok = clexer.token()
while tok != None:
result.append(tok.value)
tok = clexer.token()
return result
count = 0
def drawtree(text):
global count
count = 0
text = '\n'.join(text.splitlines())
cparser = yacc.yacc(module=cgrammar, write_tables=0, debug=0)
clexer = lex.lex(module=ctokens)
parse_tree = cparser.parse(text, lexer=clexer, tracking=False, debug=False)
draw_tree = buchheim_layout(parse_tree)
flat_tree = flattenTree(draw_tree)
return flat_tree
def flattenTree(dt):
global count
count += 1
return {
'id': str(count),
'node': dt.tree.node,
'x': dt.x,
'y': dt.y,
'children':[flattenTree(c) for c in dt.children]
}
#DEBUG & testing =')
# text = ''' #include<stdio.h>
# int fact(int n) {
# if (n < 2) {
# return 1;
# } else {
# return fact(n - 1) * n;
# }
# }
# fact(4); '''
# cparser = yacc.yacc(module=cgrammar, write_tables=0, debug=0)
# clexer = lex.lex(module=ctokens)
# parse_tree = cparser.parse(text, lexer=clexer, tracking=False, debug=False)
# print parse_tree.children[0].node | {
"content_hash": "04ef0ff733684946b062a371cc2f4edf",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 77,
"avg_line_length": 22.229508196721312,
"alnum_prop": 0.6541297935103245,
"repo_name": "lusv/irunaround",
"id": "8ee4c551a440354ab2006558729dba8a188b3ca5",
"size": "1356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "irunaround/frontend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "D",
"bytes": "6783335"
},
{
"name": "JavaScript",
"bytes": "4511365"
},
{
"name": "Python",
"bytes": "186247"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ResourceQuotaStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'hard': 'dict(str, str)',
'used': 'dict(str, str)'
}
attribute_map = {
'hard': 'hard',
'used': 'used'
}
def __init__(self, hard=None, used=None):
"""
V1ResourceQuotaStatus - a model defined in Swagger
"""
self._hard = None
self._used = None
self.discriminator = None
if hard is not None:
self.hard = hard
if used is not None:
self.used = used
@property
def hard(self):
"""
Gets the hard of this V1ResourceQuotaStatus.
Hard is the set of enforced hard limits for each named resource. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md
:return: The hard of this V1ResourceQuotaStatus.
:rtype: dict(str, str)
"""
return self._hard
@hard.setter
def hard(self, hard):
"""
Sets the hard of this V1ResourceQuotaStatus.
Hard is the set of enforced hard limits for each named resource. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md
:param hard: The hard of this V1ResourceQuotaStatus.
:type: dict(str, str)
"""
self._hard = hard
@property
def used(self):
"""
Gets the used of this V1ResourceQuotaStatus.
Used is the current observed total usage of the resource in the namespace.
:return: The used of this V1ResourceQuotaStatus.
:rtype: dict(str, str)
"""
return self._used
@used.setter
def used(self, used):
"""
Sets the used of this V1ResourceQuotaStatus.
Used is the current observed total usage of the resource in the namespace.
:param used: The used of this V1ResourceQuotaStatus.
:type: dict(str, str)
"""
self._used = used
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ResourceQuotaStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "32277706c1181e11c8115535b26e2a94",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 178,
"avg_line_length": 27.394736842105264,
"alnum_prop": 0.55283381364073,
"repo_name": "mbohlool/client-python",
"id": "3f625d9dac4db503515cb58d550a3c6a1edf0121",
"size": "4181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_resource_quota_status.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8417639"
},
{
"name": "Shell",
"bytes": "16830"
}
],
"symlink_target": ""
} |
"""
#: 205
Title: Isomorphic Strings
Description:
------
Given two strings s and t, determine if they are isomorphic.
Two strings are isomorphic if the characters in s can be replaced to get t.
All occurrences of a character must be replaced with another character while
preserving the order of characters. No two characters may map to the same
character but a character may map to itself.
For example,
Given "egg", "add", return true.
Given "foo", "bar", return false.
Given "paper", "title", return true.
Note:
You may assume both s and t have the same length.
------
Time: O(n)
Space: O(n)
Difficulty: Easy
"""
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
s2t = {} # a hash table map s to t
used = set()
for i in xrange(len(s)):
if s[i] not in s2t and t[i] not in used:
s2t[s[i]] = t[i]
used.add(t[i])
elif (s[i] not in s2t and t[i] in used) or (s[i] in s2t and t[i] not in used):
return False
else:
if s2t[s[i]] != t[i]:
return False
return True
if __name__ == '__main__':
sol = Solution()
s = 'egg'
t = 'add'
print sol.isIsomorphic(s,t)
| {
"content_hash": "2a267b9b435a9e2796d4453dba2fb53a",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 90,
"avg_line_length": 24.425925925925927,
"alnum_prop": 0.5655799848369977,
"repo_name": "RobinCPC/algorithm-practice",
"id": "9e4ee3a208873385ebd0efcda33b1f8f6724ac5b",
"size": "1319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HashTable/isIsomorphic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "72337"
},
{
"name": "Jupyter Notebook",
"bytes": "25145"
},
{
"name": "Makefile",
"bytes": "2344"
},
{
"name": "Python",
"bytes": "114497"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class YaxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="yaxis", parent_name="box", **kwargs):
super(YaxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", "y"),
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "371f0075f3b802b27ad39e3480cb4db5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 37.76923076923077,
"alnum_prop": 0.5967413441955194,
"repo_name": "plotly/python-api",
"id": "4da8cac1afbbf9e36450e78701396556e1df32b2",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/box/_yaxis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""
celery.loaders.default
~~~~~~~~~~~~~~~~~~~~~~
The default loader used when no custom app has been initialized.
"""
from __future__ import absolute_import
import os
import warnings
from celery.datastructures import DictAttribute
from celery.exceptions import NotConfigured
from celery.utils import strtobool
from .base import BaseLoader
DEFAULT_CONFIG_MODULE = 'celeryconfig'
#: Warns if configuration file is missing if :envvar:`C_WNOCONF` is set.
C_WNOCONF = strtobool(os.environ.get('C_WNOCONF', False))
class Loader(BaseLoader):
"""The loader used by the default app."""
def setup_settings(self, settingsdict):
return DictAttribute(settingsdict)
def read_configuration(self):
"""Read configuration from :file:`celeryconfig.py` and configure
celery and Django so it can be used by regular Python."""
configname = os.environ.get('CELERY_CONFIG_MODULE',
DEFAULT_CONFIG_MODULE)
try:
usercfg = self._import_config_module(configname)
except ImportError:
# billiard sets this if forked using execv
if C_WNOCONF and not os.environ.get('FORKED_BY_MULTIPROCESSING'):
warnings.warn(NotConfigured(
'No %r module found! Please make sure it exists and '
'is available to Python.' % (configname, )))
return self.setup_settings({})
else:
self.configured = True
return self.setup_settings(usercfg)
| {
"content_hash": "7a748c8a6d362f6c6d9f202e46e22df6",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 77,
"avg_line_length": 32.829787234042556,
"alnum_prop": 0.6409591704471809,
"repo_name": "mozilla/firefox-flicks",
"id": "f695538cba3eece9e2e5e3224596b873b852868f",
"size": "1567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/celery/loaders/default.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "68358"
},
{
"name": "HTML",
"bytes": "337116"
},
{
"name": "JavaScript",
"bytes": "44816"
},
{
"name": "Puppet",
"bytes": "6653"
},
{
"name": "Python",
"bytes": "4166155"
},
{
"name": "Shell",
"bytes": "2409"
}
],
"symlink_target": ""
} |
import re
from datetime import datetime
from itertools import izip_longest
import html5lib
from lxml import etree
from cadorsfeed.cadorslib.xpath_functions import extensions
from cadorsfeed.cadorslib.narrative import process_narrative
from cadorsfeed.cadorslib.locations import LocationStore
from cadorsfeed.aerodb import aerodromes_re, lookup
NSMAP = {'h': 'http://www.w3.org/1999/xhtml',
'pyf': 'urn:uuid:fb23f64b-3c54-4009-b64d-cc411bd446dd',
'a': 'http://www.w3.org/2005/Atom',
'geo': 'http://www.w3.org/2003/01/geo/wgs84_pos#'}
def grouper(n, iterable):
args = [iter(iterable)] * n
return [group(l) for l in izip_longest(fillvalue=None, *args)]
def group(elements):
element = etree.Element("{http://www.w3.org/1999/xhtml}p",
nsmap={'h': 'http://www.w3.org/1999/xhtml'})
element.extend(elements)
return element
def extractor(node, fields):
out = {}
for field, specification in fields.iteritems():
(query, translator) = specification
data = node.xpath(query, namespaces=NSMAP,
extensions=extensions)
data = translator(data)
if translator is str or translator is unicode:
if len(data) == 0:
data = None
out[field] = data
return out
def q(query):
return "pyf:strip_nbsp(.//h:th[text()='%s']/" \
"following-sibling::h:td/h:strong/text())" % query
def safe_int(value):
try:
return int(value)
except ValueError:
return None
def fix_name(name):
(last, first) = name.split(", ")
return u"%s %s" % (first, last)
def narrative_date(date_string):
return datetime.strptime(date_string, "%Y-%m-%d")
def unicode_list(items):
return [unicode(item) for item in items]
def parse_daily_report(report_file):
parser = html5lib.HTMLParser(
tree=html5lib.treebuilders.getTreeBuilder("lxml"))
etree_document = parser.parse(report_file, encoding="utf-8")
reports = etree_document.xpath("//h:div[@class = 'pagebreak']",
namespaces=NSMAP)
parsed_reports = []
for report_xml in reports:
report_data = parse_report(report_xml)
parsed_reports.append(report_data)
header_date = re.search("\d\d\d\d-\d\d-\d\d",
etree_document.xpath(
'//h:div[@class = "widthFull" and ' \
'contains(text(), "CADORS National Report dated")]',
namespaces=NSMAP)[0].text).group()
daily_report = {'date': datetime.strptime(header_date,
"%Y-%m-%d"),
'parse_timestamp': datetime.utcnow(),
'reports': parsed_reports}
return daily_report
def parse_report(report):
fields = {'cadors_number': (q('Cadors Number:'), str),
'region': (q('Reporting Region:'), unicode),
'occurrence_type': (q('Occurrence Type:'), unicode),
'date': (q('Occurrence Date:'), str),
'time': (q('Occurrence Time:'), str),
'day_night': (q('Day Or Night:'), unicode),
'fatalities': (q('Fatalities:'), safe_int),
'injuries': (q('Injuries:'), safe_int),
'tclid': (q('Canadian Aerodrome ID:'), str),
'aerodrome_name': (q('Aerodrome Name:'), unicode),
'location': (q('Occurrence Location:'), unicode),
'province': (q('Province:'), unicode),
'country': (q('Country:'), unicode),
'world_area': (q('World Area:'), unicode),
'reported_by': (q('Reported By:'), unicode),
'nav_canada_aor': (q('AOR Number:'), unicode),
'tsb_class': (q('TSB Class Of Investigation:'), safe_int),
'tsb_number': (q('TSB Occurrence No:'), unicode)}
fields['categories'] = (
".//h:fieldset/h:legend/h:strong[contains(text()," \
"'Event Information')]/../following-sibling::h:table//" \
"h:strong/text()", unicode_list)
report_data = extractor(report, fields)
report_data['narrative'] = []
report_data['aircraft'] = []
narrative_parts = report.xpath(
".//h:fieldset/h:legend/h:strong[contains(text()," \
"'Detail Information')]/../following-sibling::h:table",
namespaces=NSMAP, extensions=extensions)
narrative_fields = {'author_name': (q('User Name:'), fix_name),
'date': (q('Date:'), narrative_date),
'further_action': (q('Further Action Required:'),
unicode),
'opi': (q('O.P.I.:'), unicode),
'narrative_text': (q('Narrative:'), unicode)}
for narrative_part in grouper(5, narrative_parts):
narrative_data = extractor(narrative_part,
narrative_fields)
report_data['narrative'].append(narrative_data)
aircraft_parts = report.xpath(
".//h:fieldset/h:legend/h:strong[contains(text()," \
"'Aircraft Information')]/../following-sibling::h:table",
namespaces=NSMAP, extensions=extensions)
aircraft_fields = {'flight_number': (q('Flight #:'), unicode),
'category': (q('Aircraft Category:'), unicode),
'reg_country': (q('Country of Registration:'), unicode),
'make': (q('Make:'), unicode),
'model': (q('Model:'), unicode),
'year': (q('Year Built:'), safe_int),
'amateur_built': (q('Amateur Built:'), unicode),
'engine_make': (q('Engine Make:'), unicode),
'engine_model': (q('Engine Model:'), unicode),
'engine_type': (q('Engine Type:'), unicode),
'gear_type': (q('Gear Type:'), unicode),
'flight_phase': (q('Phase of Flight:'), unicode),
'damage': (q('Damage:'), unicode),
'owner': (q('Owner:'), unicode),
'operator': (q('Operator:'), unicode),
'operator_type': (q('Operator Type:'), unicode)}
for aircraft_part in grouper(9, aircraft_parts):
aircraft_data = extractor(aircraft_part,
aircraft_fields)
report_data['aircraft'].append(aircraft_data)
#All of the extraction is done; on to formatting and cleanup.
#(which is no longer done here)
return report_data
| {
"content_hash": "5d406bb4874a8d79cc34112722277300",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 79,
"avg_line_length": 37.88,
"alnum_prop": 0.5402021421028813,
"repo_name": "kurtraschke/cadors-parse",
"id": "d8cf5a9fcaf9a876266e5bec6a217438ca1fb80a",
"size": "6629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cadorsfeed/cadorslib/parse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3038"
},
{
"name": "Python",
"bytes": "77077"
}
],
"symlink_target": ""
} |
from django.views.generic import TemplateView, View
from dash.access import (df, hostname, ip, issue, mem, numberofcores, w, ps,
whereis, users, boot, loadavg, bandwidth, dnsmasq_leases,
ping, date)
from dash.utils import json_response
class IndexView(TemplateView):
template_name = "dash/index.html"
def base(cls, func):
@json_response
def get(request, *args, **kwargs):
return func()
return type(cls, (View, ), {'get': get})
DfView = base('DfView', df)
HostnameView = base('HostnameView', hostname)
ExternalipView = base('ExternalipView', ip)
IssueView = base('IssueView', issue)
MemoryView = base('MemoryView', mem)
WhoView = base('WhoView', w)
NumcpuView = base('NumcpuView', numberofcores)
PsView = base('PsView', ps)
WhereisView = base('WhereisView', whereis)
UsersView = base('UsersView', users)
BootView = base('BootView', boot)
LoadavgView = base('LoadavgView', loadavg)
BandwidthView = base('BandwidthView', bandwidth)
DnsmasqView = base('DnsmasqView', dnsmasq_leases)
PingView = base('PingView', ping)
TimeView = base('TimeView', date)
| {
"content_hash": "bc7d807f11d6ccb3de0fca6add001717",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 77,
"avg_line_length": 32.85294117647059,
"alnum_prop": 0.6947179946284691,
"repo_name": "dongweiming/django-linux-dash",
"id": "1b3e268c6c3fbe64b11f8b73f136b65cb67ddb44",
"size": "1117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dash/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "74441"
},
{
"name": "JavaScript",
"bytes": "20003"
},
{
"name": "Python",
"bytes": "19536"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('halfwayapp', '0003_auto_20160222_2252'),
]
operations = [
migrations.AlterField(
model_name='participant',
name='starting_location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='halfwayapp.Address'),
),
]
| {
"content_hash": "8459604976d9b41795cb383ef438835f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 129,
"avg_line_length": 26.789473684210527,
"alnum_prop": 0.6522593320235757,
"repo_name": "cszc/meethalfway",
"id": "ecc042bf17dc7485d553b06dde69773fc0c42bf8",
"size": "581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangohalfway/halfwayapp/migrations/0004_auto_20160222_2253.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7663"
},
{
"name": "Python",
"bytes": "34804"
}
],
"symlink_target": ""
} |
import BBB_QC # Written libraries for the Quadcopter
import Adafruit_BBIO.GPIO as GPIO # Adafruit GPIO libraries
import Adafruit_BBIO.PWM as PWM # Adafruit PWM libraries
import sys # Libraries for system nature jobs
import time # For times and delays
import signal # detects inputs/interupts
import numpy # for Matlab style jobs
############### Global Variables ###############
############### Functions ###############
# Included to ensure a clean exit
def Control_C_Exit(signal, frame):
GPIO.cleanup()
PWM.cleanup()
print("\nProgram halted! Exiting program!")
sys.exit()
############### And so it begins ###############
signal.signal(signal.SIGINT, Control_C_Exit) # For cleaning up mid run
BBB_QC.Motor_Throttle_Initialize()
Motor_Set_1 = numpy.zeros((101,100))
Motor_Set_2 = numpy.zeros((101,100))
Motor_Set_3 = numpy.zeros((101,100))
Motor_Set_4 = numpy.zeros((101,100))
print "Beginning calibration."
print "Time to complete: approximatly 4.2 minutes\n"
for Throttle in range(15,101):
print "Throttle set to {0}%.".format(Throttle)
BBB_QC.Motor_1_Throttle(Throttle)
time.sleep(1)
# Delay here was placed to allow the motors to reach steadystate
# conditions
for Datapoint in range(15,100):
Motor_Set_1[Throttle,Datapoint] = BBB_QC.Motor_1_Frequency
Motor_Set_2[Throttle,Datapoint] = BBB_QC.Motor_2_Frequency
Motor_Set_3[Throttle,Datapoint] = BBB_QC.Motor_3_Frequency
Motor_Set_4[Throttle,Datapoint] = BBB_QC.Motor_4_Frequency
time.sleep(0.001)
BBB_QC.Motor_All_Throttle(0)
print "Motor calibration data collection compleate"
print "Analyzing data..."
Motor_1_Mean = numpy.mean(Motor_Set_1,1)
Motor_2_Mean = numpy.mean(Motor_Set_2,1)
Motor_3_Mean = numpy.mean(Motor_Set_3,1)
Motor_4_Mean = numpy.mean(Motor_Set_4,1)
Motor_1_Std = numpy.std(Motor_Set_1,1)
Motor_2_Std = numpy.std(Motor_Set_2,1)
Motor_3_Std = numpy.std(Motor_Set_3,1)
Motor_4_Std = numpy.std(Motor_Set_4,1)
print Motor_1_Mean
print Motor_1_Std
BBB_QC.Motor_All_Throttle(0)
print("\nProgram halted! Exiting program!")
sys.exit()
| {
"content_hash": "57cdb6adcbf41114c5112dffb420a807",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 70,
"avg_line_length": 28.534246575342465,
"alnum_prop": 0.6908305328852616,
"repo_name": "ValRose/BBB_Quadcopter",
"id": "61ad164078a7ef2a214a227946b32815d94e6891",
"size": "2723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Flight_Program/BBB_QC_Motor_Calibration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7121885"
},
{
"name": "C++",
"bytes": "14290"
},
{
"name": "Groff",
"bytes": "3367"
},
{
"name": "Makefile",
"bytes": "154554"
},
{
"name": "NSIS",
"bytes": "3383"
},
{
"name": "Objective-C",
"bytes": "21894"
},
{
"name": "Perl",
"bytes": "1575"
},
{
"name": "Python",
"bytes": "43458"
},
{
"name": "Shell",
"bytes": "6310"
}
],
"symlink_target": ""
} |
from carepoint import Carepoint
from sqlalchemy import (Column,
Integer,
Numeric,
ForeignKey,
DateTime,
String,
)
class AccountActivity(Carepoint.BASE):
__tablename__ = 'CsSArActivity'
__dbname__ = 'cph'
id = Column(
Integer,
primary_key=True,
autoincrement=False,
)
acct_id = Column(
Integer,
ForeignKey('cp_acct.ID'),
)
order_id = Column(
Integer,
ForeignKey('CsOm.order_id'),
)
pat_id = Column(
Integer,
ForeignKey('cppat.pat_id'),
)
rxdisp_id = Column(
Integer,
ForeignKey('cprx_disp.rxdisp_id'),
)
org_id = Column(
Integer,
ForeignKey('csorg.org_id'),
)
orig_org_id = Column(
Integer,
ForeignKey('csorg.org_id'),
)
item_id = Column(
Integer,
ForeignKey('item.item_id'),
)
descr = Column(String)
refno = Column(String)
ct_id = Column(Integer)
code_num = Column(Integer)
amt = Column(Numeric)
reason_cn = Column(Integer)
rpt_cat = Column(Integer)
qty = Column(Numeric)
tax = Column(Numeric)
nonrx_order_line_id = Column(Integer)
store_id = Column(
Integer,
ForeignKey('csstore.store_id'),
)
batch_id = Column(Integer)
batch_no = Column(String)
add_user_id = Column(
Integer,
ForeignKey('csuser.user_id'),
)
add_date = Column(DateTime)
chg_user_id = Column(
Integer,
ForeignKey('csuser.user_id'),
)
chg_date = Column(DateTime)
| {
"content_hash": "3dee62337708ccbc96cd2a435ff4565f",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 42,
"avg_line_length": 23.45205479452055,
"alnum_prop": 0.5175233644859814,
"repo_name": "laslabs/Python-Carepoint",
"id": "5c8cc6be1deda3fb5a0a598f3984638965c2f13d",
"size": "1826",
"binary": false,
"copies": "1",
"ref": "refs/heads/feature/SMD-319-enum-image_type_cn",
"path": "carepoint/models/cph/account_activity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "165871"
}
],
"symlink_target": ""
} |
import cassiopeia.dto.requests
import cassiopeia.type.dto.stats
def get_ranked_stats(summoner_id, season=None):
"""https://developer.riotgames.com/api/methods#!/1018/3452
summoner_id int the summoner to get ranked stats for
season str the season to get ranked stats for ("SEASON2015", "SEASON2014", "SEASON3") (default None)
return RankedStats the ranked stats for the summoner and season specified
"""
request = "{version}/stats/by-summoner/{id_}/ranked".format(version=cassiopeia.dto.requests.api_versions["stats"], id_=summoner_id)
params = {}
if season:
params["season"] = season
return cassiopeia.type.dto.stats.RankedStats(cassiopeia.dto.requests.get(request, params))
def get_stats(summoner_id, season=None):
"""https://developer.riotgames.com/api/methods#!/1018/3453
summoner_id int the summoner to get ranked stats for
season str the season to get ranked stats for ("SEASON2015", "SEASON2014", "SEASON3") (default None)
return PlayerStatsSummaryList the ranked stats for the summoner and season specified
"""
request = "{version}/stats/by-summoner/{id_}/summary".format(version=cassiopeia.dto.requests.api_versions["stats"], id_=summoner_id)
params = {}
if season:
params["season"] = season
return cassiopeia.type.dto.stats.PlayerStatsSummaryList(cassiopeia.dto.requests.get(request, params))
| {
"content_hash": "9a1bc0d8940a3f09a1efc0854c71830b",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 136,
"avg_line_length": 43,
"alnum_prop": 0.6524547803617571,
"repo_name": "MakersF/cassiopeia",
"id": "3b303dc075fd438ff9f8e3977cad0f2a937679ce",
"size": "1548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cassiopeia/dto/statsapi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "602362"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from setuptools import setup
setup(
name='Chong',
version='0.1dev',
author='Jeff Bradberry',
author_email='jeff.bradberry@gmail.com',
packages=['chong'],
entry_points={
'jrb_board.games': 'chong = chong.chong:Board',
},
install_requires=['six'],
license='LICENSE',
description="An implementation of the board game Chong.",
)
| {
"content_hash": "5450362ac1f5002037c22e8a6777d789",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 61,
"avg_line_length": 25.8125,
"alnum_prop": 0.6464891041162227,
"repo_name": "jbradberry/chong",
"id": "8b9593c2d913053396f0f671deffbfe6caa8851d",
"size": "413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49983"
}
],
"symlink_target": ""
} |
import errno
import os
def check_pid(*filenames):
try:
for filename in filenames:
if not os.path.exists(filename):
continue
with open(filename) as f:
pid = int(f.read().strip())
break
else:
return False
os.kill(pid, 0)
except OSError, e:
if e.errno == errno.ESRCH:
return False
raise
else:
return pid | {
"content_hash": "d0fd6357947061323b64ada8df46881b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 44,
"avg_line_length": 22.85,
"alnum_prop": 0.487964989059081,
"repo_name": "dataflow/DataStage",
"id": "f9048581c2c15061c85e112ab308d8fe7f8e9f86",
"size": "1711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datastage/admin/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36092"
},
{
"name": "JavaScript",
"bytes": "3586"
},
{
"name": "Python",
"bytes": "755565"
},
{
"name": "Shell",
"bytes": "60793"
}
],
"symlink_target": ""
} |
import telebot
import adminList as Admin
from sqldata import db
import keys
import keyboards
from loadSchedule import load_schedule
from replacement import Replacement
import json
import datetime
from session import Users, AntiCrash
from const import HUD
import messageGroup
from notification_group import send_replacements_to_subscribers
import flask_app
import pdfkit
Admin.init(db)
bot = telebot.TeleBot(keys.DISPATCHER_TOKEN)
bot_2 = telebot.TeleBot(keys.SUBSCRIBER_TOKEN)
@bot.message_handler(commands=["start"])
def initilization(message):
chat_id = message.chat.id
AntiCrash(chat_id)
if (Admin.isAdmin(chat_id)):
markup = keyboards.admin_menu()
bot.send_message(message.chat.id, HUD.START, reply_markup=markup)
else:
markup = keyboards.user_menu()
bot.send_message(message.chat.id, HUD.START, reply_markup=markup)
@bot.message_handler(commands=["help"])
def helping(message):
bot.send_message(message.chat.id, HUD.HELP)
@bot.message_handler(commands=["myid"])
def myid(message):
bot.send_message(message.chat.id, message.chat.id)
@bot.message_handler(content_types=["document"])
def load_sc(message):
chat_id = message.chat.id
AntiCrash(chat_id)
if (Users[chat_id].Action == HUD.ACTION_LOADFILE):
load_schedule(message, bot, db)
else:
bot.send_message(chat_id, HUD.HELP_INFO)
return
@bot.message_handler(content_types=["text"])
def msg_handler(message):
text = message.text
chat_id = message.chat.id
AntiCrash(chat_id)
rep = Users[message.chat.id].replacements
if (text == HUD.BUTTON_AUTH):
bot.send_message(chat_id, HUD.AUTH)
return
elif (text == HUD.PASSWORD):
Admin.newAdmin(db, chat_id)
markup = keyboards.admin_menu()
bot.send_message(chat_id, HUD.AUTH_SUCCESS, reply_markup=markup)
return
if not (Admin.isAdmin(chat_id)):
bot.send_message(chat_id, HUD.HELP_INFO)
return
if (text == HUD.BUTTON_MESSAGE):
Users[chat_id].Action = HUD.ACTION_MESSAGE
bot.send_message(chat_id, HUD.SEND_MSG)
elif (text == HUD.BUTTON_MESSAGE_GROUP):
Users[chat_id].Action = HUD.ACTION_MESSAGE_GROUP
markup = keyboards.group_message_menu(messageGroup.getGroups())
bot.send_message(chat_id, "Выберете группу", reply_markup=markup)
elif (text == HUD.BUTTON_LOADFILE):
Users[chat_id].Action = HUD.ACTION_LOADFILE
bot.send_message(chat_id, HUD.LOADFILE)
elif (text == HUD.BUTTON_EXIT):
Admin.remAdmin(db, message.chat.id)
markup = keyboards.user_menu()
bot.send_message(message.chat.id, HUD.DISCONNECT, reply_markup=markup)
elif (text == HUD.BUTTON_REPLACEMENT):
rep = Replacement()
Users[chat_id].replacements = rep
markup = keyboards.replacement_menu(0, rep)
bot.send_message(message.chat.id, "Выберете курс", reply_markup=markup)
elif text == HUD.BUTTON_PUBLISH_REPLACEMENTS:
send_replacements_to_subscribers()
bot.send_message(chat_id, "Уведомления отправлены")
elif (Users[chat_id].Action == HUD.ACTION_MESSAGE):
if (text == "отм"):
Users[chat_id].Action = 0
return
send_msg = message.chat.first_name + " " + message.chat.last_name + ": " + text
for u in Users:
try:
bot_2.send_message(u, send_msg)
except:
pass
Users[chat_id].Action = 0
return
elif (Users[chat_id].Action == HUD.ACTION_MESSAGE_GROUP_TYPING):
if (text == "отм"):
Users[chat_id].Action = 0
return
send_msg = message.chat.first_name + " " + message.chat.last_name + ": " + text
messageGroup.sendGroupMessage(bot_2, send_msg, Users[chat_id].message_group)
Users[chat_id].Action = 0
elif text == HUD.BUTTON_EXPORT_REPLACEMENT:
with flask_app.app.app_context():
today = datetime.datetime.now().date()
today_repacment = flask_app.replacements_today()
file_name = '{}/замены-{}.pdf'.format(keys.PATH_TO_PDF_FILES, today.isoformat())
pdfkit.from_string(today_repacment, file_name)
doc = open(file_name, 'rb')
bot.send_document(chat_id, doc)
tomorrow = datetime.datetime.now().date() + datetime.timedelta(days=1)
tomorrow_repacment = flask_app.replacements_tomorrow()
file_name = '{}замены-{}.pdf'.format(keys.PATH_TO_PDF_FILES, tomorrow.isoformat())
pdfkit.from_string(tomorrow_repacment, file_name)
doc = open(file_name, 'rb')
bot.send_document(chat_id, doc)
elif rep != 0 and rep.is_typing_room():
rep.setRoom(text)
markup = keyboards.replacement_menu(rep.state, rep)
bot.send_message(message.chat.id, rep.getText(), reply_markup=markup)
else:
bot.send_message(chat_id, HUD.HELP_INFO)
@bot.callback_query_handler(func=lambda res: True)
def cllbck(res):
res.data = json.loads(res.data)
key = res.data['0']
value = res.data['1']
message = res.message
if (key == "message_group"):
Users[message.chat.id].Action = HUD.ACTION_MESSAGE_GROUP_TYPING
Users[message.chat.id].message_group = value
bot.send_message(message.chat.id, HUD.SEND_MSG)
return
rep = Users[message.chat.id].replacements
if (rep == 0):
return
state = rep.action(key, value)
markup = keyboards.replacement_menu(state, rep)
if (markup == 0):
bot.send_message(message.chat.id, rep.getText())
else:
bot.send_message(message.chat.id, rep.getText(), reply_markup=markup)
if __name__ == '__main__':
if not keys.TELERGRAM_LOGGER_ENABLED:
bot.polling(none_stop=True, interval=0)
else:
import logging
import telegram_logger
import traceback
logger = logging.getLogger()
telegram_handle = telegram_logger.TelegramHandler()
telegram_handle.setLevel(logging.WARNING)
logger.addHandler(telegram_handle)
logger.warning('Бот диспетчера запушен')
try:
bot.polling(none_stop=True, interval=0)
except Exception as e:
telegram_logger.send_msg_to_tele_logger(traceback.format_exc())
| {
"content_hash": "9197a976e9130b45ac4800485ebe78cf",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 94,
"avg_line_length": 36.21666666666667,
"alnum_prop": 0.6217211228716061,
"repo_name": "uksivt-guys/schedule-bot",
"id": "834eaa4e8cdf69f886ec8e95d9adb2c22d42d523",
"size": "6629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dispetcher_bot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1633"
},
{
"name": "Python",
"bytes": "75119"
}
],
"symlink_target": ""
} |
import wx
from ui.custom_checkbox import CustomCheckBox
class CustomMenuBar(wx.Panel):
def __init__(self, parent, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.SetBackgroundColour(self.parent.GetBackgroundColour())
self.SetForegroundColour(self.parent.GetForegroundColour())
self.SetFont(self.parent.GetFont())
self.img_size = 12
self._dragPos = None
self.Bind(wx.EVT_MOTION, self.OnMouse)
gbSizer = wx.GridBagSizer()
self.txtTitle = wx.StaticText(self, wx.ID_ANY, u"Tera DPS ", wx.DefaultPosition, wx.DefaultSize, 0)
gbSizer.Add(self.txtTitle, wx.GBPosition(0, 0), wx.GBSpan(1, 1), wx.ALL, 5)
self.txtServer = wx.StaticText(self, wx.ID_ANY, u"", wx.DefaultPosition, wx.DefaultSize, 0)
gbSizer.Add(self.txtServer, wx.GBPosition(0, 1), wx.GBSpan(1, 1), wx.ALL | wx.ALIGN_CENTER_HORIZONTAL , 5)
self.btn_pin = CustomCheckBox(self, 'ui.pin', color_checked='#FF0000', color_hover='#1188FF')
self.btn_pin.Bind(wx.EVT_CHECKBOX, self.parent.TogglePin)
gbSizer.Add(self.btn_pin, wx.GBPosition(0, 2), wx.GBSpan(1, 1), wx.ALL, 6)
self.btn_config = CustomCheckBox(self, 'ui.settings', color_checked='#FF0000', color_hover='#1188FF')
self.btn_config.Bind(wx.EVT_CHECKBOX, self.parent.ToggleConfig)
gbSizer.Add(self.btn_config, wx.GBPosition(0, 3), wx.GBSpan(1, 1), wx.ALL, 6)
self.btn_close = CustomCheckBox(self, 'ui.close', color_hover='#1188FF')
self.btn_close.Bind(wx.EVT_CHECKBOX, self.parent.OnClose)
gbSizer.Add(self.btn_close, wx.GBPosition(0, 4), wx.GBSpan(1, 1), wx.ALL, 6)
self.line1 = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
gbSizer.Add(self.line1, wx.GBPosition(1, 0), wx.GBSpan(1, 5), wx.EXPAND | wx.ALL, 0)
gbSizer.AddGrowableCol(1)
self.SetSizer(gbSizer)
def OnMouse(self, event):
if not event.Dragging():
if self._dragPos:
self.ReleaseMouse()
x , y = self.parent.GetPosition()
self.parent.config.WriteInt('x', x)
self.parent.config.WriteInt('y', y)
self._dragPos = None
return
if not self._dragPos:
self.CaptureMouse()
self._dragPos = event.GetPosition()
else:
pos = event.GetPosition()
displacement = self._dragPos - pos
self.parent.SetPosition(self.parent.GetPosition() - displacement)
| {
"content_hash": "a4fae958192b23c58b35d88047f341dd",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 114,
"avg_line_length": 48.01851851851852,
"alnum_prop": 0.6247589664481296,
"repo_name": "jeff-alves/Tera",
"id": "1ddbbf4e83de3538866dc6bdcb84dbabb9905fa5",
"size": "2593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui/custom_menu_bar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113659"
}
],
"symlink_target": ""
} |
"""Django test runner that invokes nose.
You can use... ::
NOSE_ARGS = ['list', 'of', 'args']
in settings.py for arguments that you want always passed to nose.
"""
from __future__ import print_function, unicode_literals
import os
import sys
from optparse import make_option
from types import MethodType
import django.db.models.loading
from django.conf import settings
from django.core import exceptions
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.core.management.commands.loaddata import Command
from django.db import connections, transaction, DEFAULT_DB_ALIAS
try:
from django.db.backends.base.creation import BaseDatabaseCreation
except ImportError:
# Django < 1.7
from django.db.backends.creation import BaseDatabaseCreation
try:
from importlib import import_module
except ImportError:
# Django < 1.7 and Python < 2.7
from django.utils.importlib import import_module
try:
from django.apps import apps
except ImportError:
# Django < 1.7
from django.db.models.loading import cache as apps
import nose.core
from django_nose.plugin import DjangoSetUpPlugin, ResultPlugin, TestReorderer
from django_nose.utils import uses_mysql
# Use DiscoverRunner if available, as DjangoTestSuiteRunner is deprecated in 1.7
try:
from django.test.runner import DiscoverRunner as BaseRunner
except ImportError:
from django.test.simple import DjangoTestSuiteRunner as BaseRunner
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
__all__ = ('BasicNoseRunner', 'NoseTestSuiteRunner')
# This is a table of Django's "manage.py test" options which
# correspond to nosetests options with a different name:
OPTION_TRANSLATION = {'--failfast': '-x',
'--nose-verbosity': '--verbosity'}
def translate_option(opt):
if '=' in opt:
long_opt, value = opt.split('=', 1)
return '%s=%s' % (translate_option(long_opt), value)
return OPTION_TRANSLATION.get(opt, opt)
# Django v1.2 does not have a _get_test_db_name() function.
if not hasattr(BaseDatabaseCreation, '_get_test_db_name'):
def _get_test_db_name(self):
TEST_DATABASE_PREFIX = 'test_'
if self.connection.settings_dict['TEST_NAME']:
return self.connection.settings_dict['TEST_NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
BaseDatabaseCreation._get_test_db_name = _get_test_db_name
def _get_plugins_from_settings():
plugins = (list(getattr(settings, 'NOSE_PLUGINS', [])) +
['django_nose.plugin.TestReorderer'])
for plug_path in plugins:
try:
dot = plug_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured(
"%s isn't a Nose plugin module" % plug_path)
p_mod, p_classname = plug_path[:dot], plug_path[dot + 1:]
try:
mod = import_module(p_mod)
except ImportError as e:
raise exceptions.ImproperlyConfigured(
'Error importing Nose plugin module %s: "%s"' % (p_mod, e))
try:
p_class = getattr(mod, p_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured(
'Nose plugin module "%s" does not define a "%s"' %
(p_mod, p_classname))
yield p_class()
def _get_options():
"""Return all nose options that don't conflict with django options."""
cfg_files = nose.core.all_config_files()
manager = nose.core.DefaultPluginManager()
config = nose.core.Config(env=os.environ, files=cfg_files, plugins=manager)
config.plugins.addPlugins(list(_get_plugins_from_settings()))
options = config.getParser()._get_all_options()
# copy nose's --verbosity option and rename to --nose-verbosity
verbosity = [o for o in options if o.get_opt_string() == '--verbosity'][0]
verbosity_attrs = dict((attr, getattr(verbosity, attr))
for attr in verbosity.ATTRS
if attr not in ('dest', 'metavar'))
options.append(make_option('--nose-verbosity',
dest='nose_verbosity',
metavar='NOSE_VERBOSITY',
**verbosity_attrs))
# Django 1.6 introduces a "--pattern" option, which is shortened into "-p"
# do not allow "-p" to collide with nose's "--plugins" option.
plugins_option = [
o for o in options if o.get_opt_string() == '--plugins'][0]
plugins_option._short_opts.remove('-p')
django_opts = [opt.dest for opt in BaseCommand.option_list] + ['version']
return tuple(
o for o in options if o.dest not in django_opts and o.action != 'help')
if hasattr(BaseCommand, 'use_argparse'):
# Django 1.8 and later uses argparse.ArgumentParser
# Translate nose optparse arguments to argparse
class BaseRunner(BaseRunner):
# Don't pass the following options to nosetests
django_opts = [
'--noinput', '--liveserver', '-p', '--pattern', '--testrunner',
'--settings']
#
# For optparse -> argparse conversion
#
# Option strings to remove from Django options if found
_argparse_remove_options = (
'-p', # Short arg for nose's --plugins, not Django's --patterns
'-d', # Short arg for nose's --detailed-errors, not Django's
# --debug-sql
)
# Convert nose optparse options to argparse options
_argparse_type = {
'int': int,
'float': float,
'complex': complex,
'string': str,
'choice': str,
}
# If optparse has a None argument, omit from call to add_argument
_argparse_omit_if_none = (
'action', 'nargs', 'const', 'default', 'type', 'choices',
'required', 'help', 'metavar', 'dest', 'callback', 'callback_args',
'callback_kwargs')
# Translating callbacks is not supported, because none of the built-in
# plugins uses one. If you have a plugin that uses a callback, please
# open a ticket or submit a working implementation.
_argparse_fail_if_not_none = (
'callback', 'callback_args', 'callback_kwargs')
@classmethod
def add_arguments(cls, parser):
"""Convert nose's optparse arguments to argparse."""
super(BaseRunner, cls).add_arguments(parser)
# Read optparse options for nose and plugins
cfg_files = nose.core.all_config_files()
manager = nose.core.DefaultPluginManager()
config = nose.core.Config(
env=os.environ, files=cfg_files, plugins=manager)
config.plugins.addPlugins(list(_get_plugins_from_settings()))
options = config.getParser()._get_all_options()
# Gather existing option strings`
django_options = set()
for action in parser._actions:
for override in cls._argparse_remove_options:
if override in action.option_strings:
# Emulate parser.conflict_handler='resolve'
parser._handle_conflict_resolve(
None, ((override, action),))
django_options.update(action.option_strings)
# Process nose optparse options
for option in options:
# Skip any options also in Django options
opt_long = option.get_opt_string()
if opt_long in django_options:
continue
if option._short_opts:
opt_short = option._short_opts[0]
if opt_short in django_options:
continue
else:
opt_short = None
# Rename nose's --verbosity to --nose-verbosity
if opt_long == '--verbosity':
opt_long = '--nose-verbosity'
# Convert optparse attributes to argparse attributes
option_attrs = {}
for attr in option.ATTRS:
value = getattr(option, attr)
# Rename options for nose's --verbosity
if opt_long == '--nose-verbosity':
if attr == 'dest':
value = 'nose_verbosity'
elif attr == 'metavar':
value = 'NOSE_VERBOSITY'
# Omit arguments that are None, use default
if attr in cls._argparse_omit_if_none and value is None:
continue
# Translating callbacks is not supported
if attr in cls._argparse_fail_if_not_none:
assert value is None, (
'argparse option %s=%s is not supported' %
(attr, value))
continue
# Convert type from optparse string to argparse type
if attr == 'type':
value = cls._argparse_type[value]
# Pass converted attribute to optparse option
option_attrs[attr] = value
# Add the optparse argument
if opt_short:
parser.add_argument(opt_short, opt_long, **option_attrs)
else:
parser.add_argument(opt_long, **option_attrs)
else:
# Django 1.7 and earlier use optparse
class BaseRunner(BaseRunner):
# Replace the builtin options with the merged django/nose options:
options = _get_options()
# Not add following options to nosetests
django_opts = [
'--noinput', '--liveserver', '-p', '--pattern', '--testrunner']
class BasicNoseRunner(BaseRunner):
"""Facade that implements a nose runner in the guise of a Django runner.
You shouldn't have to use this directly unless the additions made by
``NoseTestSuiteRunner`` really bother you. They shouldn't, because they're
all off by default.
"""
__test__ = False
def run_suite(self, nose_argv):
"""Run the test suite."""
result_plugin = ResultPlugin()
plugins_to_add = [DjangoSetUpPlugin(self),
result_plugin,
TestReorderer()]
for plugin in _get_plugins_from_settings():
plugins_to_add.append(plugin)
nose.core.TestProgram(argv=nose_argv, exit=False,
addplugins=plugins_to_add)
return result_plugin.result
def run_tests(self, test_labels, extra_tests=None):
"""Run the unit tests for all the test names in the provided list.
Test names specified may be file or module names, and may optionally
indicate the test case to run by separating the module or file name
from the test case name with a colon. Filenames may be relative or
absolute.
N.B.: The test_labels argument *MUST* be a sequence of
strings, *NOT* just a string object. (Or you will be
specifying tests for for each character in your string, and
not the whole string.
Examples:
runner.run_tests( ('test.module',) )
runner.run_tests(['another.test:TestCase.test_method'])
runner.run_tests(['a.test:TestCase'])
runner.run_tests(['/path/to/test/file.py:test_function'])
runner.run_tests( ('test.module', 'a.test:TestCase') )
Note: the extra_tests argument is currently ignored. You can
run old non-nose code that uses it without totally breaking,
but the extra tests will not be run. Maybe later.
Returns the number of tests that failed.
"""
nose_argv = (['nosetests'] + list(test_labels))
if hasattr(settings, 'NOSE_ARGS'):
nose_argv.extend(settings.NOSE_ARGS)
# Skip over 'manage.py test' and any arguments handled by django.
django_opts = self.django_opts[:]
for opt in BaseCommand.option_list:
django_opts.extend(opt._long_opts)
django_opts.extend(opt._short_opts)
nose_argv.extend(
translate_option(opt) for opt in sys.argv[1:]
if opt.startswith('-') and
not any(opt.startswith(d) for d in django_opts))
# if --nose-verbosity was omitted, pass Django verbosity to nose
if ('--verbosity' not in nose_argv and
not any(opt.startswith('--verbosity=') for opt in nose_argv)):
nose_argv.append('--verbosity=%s' % str(self.verbosity))
if self.verbosity >= 1:
print(' '.join(nose_argv))
result = self.run_suite(nose_argv)
# suite_result expects the suite as the first argument. Fake it.
return self.suite_result({}, result)
_old_handle = Command.handle
def _foreign_key_ignoring_handle(self, *fixture_labels, **options):
"""Wrap the the stock loaddata to ignore foreign key checks.
This allows loading circular references from fixtures, and is
monkeypatched into place in setup_databases().
"""
using = options.get('database', DEFAULT_DB_ALIAS)
commit = options.get('commit', True)
connection = connections[using]
# MySQL stinks at loading circular references:
if uses_mysql(connection):
cursor = connection.cursor()
cursor.execute('SET foreign_key_checks = 0')
_old_handle(self, *fixture_labels, **options)
if uses_mysql(connection):
cursor = connection.cursor()
cursor.execute('SET foreign_key_checks = 1')
if commit:
connection.close()
def _skip_create_test_db(self, verbosity=1, autoclobber=False, serialize=True,
keepdb=True):
"""``create_test_db`` implementation that skips both creation and flushing.
The idea is to re-use the perfectly good test DB already created by an
earlier test run, cutting the time spent before any tests run from 5-13s
(depending on your I/O luck) down to 3.
"""
# Notice that the DB supports transactions. Originally, this was done in
# the method this overrides. The confirm method was added in Django v1.3
# (https://code.djangoproject.com/ticket/12991) but removed in Django v1.5
# (https://code.djangoproject.com/ticket/17760). In Django v1.5
# supports_transactions is a cached property evaluated on access.
if callable(getattr(self.connection.features, 'confirm', None)):
# Django v1.3-4
self.connection.features.confirm()
elif hasattr(self, "_rollback_works"):
# Django v1.2 and lower
can_rollback = self._rollback_works()
self.connection.settings_dict['SUPPORTS_TRANSACTIONS'] = can_rollback
return self._get_test_db_name()
def _reusing_db():
"""Return whether the ``REUSE_DB`` flag was passed."""
return os.getenv('REUSE_DB', 'false').lower() in ('true', '1', '')
def _can_support_reuse_db(connection):
"""Return True if REUSE_DB is a sensible option for the backend."""
# Perhaps this is a SQLite in-memory DB. Those are created implicitly when
# you try to connect to them, so our usual test doesn't work.
return not connection.creation._get_test_db_name() == ':memory:'
def _should_create_database(connection):
"""Return whether we should recreate the given DB.
This is true if the DB doesn't exist or the REUSE_DB env var isn't truthy.
"""
# TODO: Notice when the Model classes change and return True. Worst case,
# we can generate sqlall and hash it, though it's a bit slow (2 secs) and
# hits the DB for no good reason. Until we find a faster way, I'm inclined
# to keep making people explicitly saying REUSE_DB if they want to reuse
# the DB.
if not _can_support_reuse_db(connection):
return True
# Notice whether the DB exists, and create it if it doesn't:
try:
connection.cursor()
except Exception: # TODO: Be more discerning but still DB agnostic.
return True
return not _reusing_db()
def _mysql_reset_sequences(style, connection):
"""Return a SQL statements needed to reset Django tables."""
tables = connection.introspection.django_table_names(only_existing=True)
flush_statements = connection.ops.sql_flush(
style, tables, connection.introspection.sequence_list())
# connection.ops.sequence_reset_sql() is not implemented for MySQL,
# and the base class just returns []. TODO: Implement it by pulling
# the relevant bits out of sql_flush().
return [s for s in flush_statements if s.startswith('ALTER')]
# Being overzealous and resetting the sequences on non-empty tables
# like django_content_type seems to be fine in MySQL: adding a row
# afterward does find the correct sequence number rather than
# crashing into an existing row.
class NoseTestSuiteRunner(BasicNoseRunner):
"""A runner that optionally skips DB creation.
Monkeypatches connection.creation to let you skip creating databases if
they already exist. Your tests will start up much faster.
To opt into this behavior, set the environment variable ``REUSE_DB`` to
something that isn't "0" or "false" (case insensitive).
"""
def _get_models_for_connection(self, connection):
"""Return a list of models for a connection."""
tables = connection.introspection.get_table_list(connection.cursor())
return [m for m in apps.get_models() if
m._meta.db_table in tables]
def setup_databases(self):
"""Setup databases, skipping DB creation if requested and possible."""
for alias in connections:
connection = connections[alias]
creation = connection.creation
test_db_name = creation._get_test_db_name()
# Mess with the DB name so other things operate on a test DB
# rather than the real one. This is done in create_test_db when
# we don't monkeypatch it away with _skip_create_test_db.
orig_db_name = connection.settings_dict['NAME']
connection.settings_dict['NAME'] = test_db_name
if _should_create_database(connection):
# We're not using _skip_create_test_db, so put the DB name
# back:
connection.settings_dict['NAME'] = orig_db_name
# Since we replaced the connection with the test DB, closing
# the connection will avoid pooling issues with SQLAlchemy. The
# issue is trying to CREATE/DROP the test database using a
# connection to a DB that was established with that test DB.
# MySQLdb doesn't allow it, and SQLAlchemy attempts to reuse
# the existing connection from its pool.
connection.close()
else:
# Reset auto-increment sequences. Apparently, SUMO's tests are
# horrid and coupled to certain numbers.
cursor = connection.cursor()
style = no_style()
if uses_mysql(connection):
reset_statements = _mysql_reset_sequences(
style, connection)
else:
reset_statements = connection.ops.sequence_reset_sql(
style, self._get_models_for_connection(connection))
if hasattr(transaction, "atomic"):
with transaction.atomic(using=connection.alias):
for reset_statement in reset_statements:
cursor.execute(reset_statement)
else:
# Django < 1.6
for reset_statement in reset_statements:
cursor.execute(reset_statement)
transaction.commit_unless_managed(using=connection.alias)
# Each connection has its own creation object, so this affects
# only a single connection:
creation.create_test_db = MethodType(
_skip_create_test_db, creation)
Command.handle = _foreign_key_ignoring_handle
# With our class patch, does nothing but return some connection
# objects:
return super(NoseTestSuiteRunner, self).setup_databases()
def teardown_databases(self, *args, **kwargs):
"""Leave those poor, reusable databases alone if REUSE_DB is true."""
if not _reusing_db():
return super(NoseTestSuiteRunner, self).teardown_databases(
*args, **kwargs)
# else skip tearing down the DB so we can reuse it next time
| {
"content_hash": "ab30760a616fcdb738027cc1b21b14e7",
"timestamp": "",
"source": "github",
"line_count": 537,
"max_line_length": 80,
"avg_line_length": 39.13221601489758,
"alnum_prop": 0.6073570000951747,
"repo_name": "Deepomatic/django-nose",
"id": "66209f7a45de50c84b462e95f2ed1002f135379a",
"size": "21030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_nose/runner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2030"
},
{
"name": "Python",
"bytes": "66815"
},
{
"name": "Shell",
"bytes": "4051"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
class FacebookProfile(models.Model):
user = models.ForeignKey(User)
site = models.ForeignKey(Site, default=Site.objects.get_current)
uid = models.CharField(max_length=255, blank=False, null=False)
def __unicode__(self):
try:
return u'%s: %s' % (self.user, self.uid)
except User.DoesNotExist:
return u'None'
def authenticate(self):
return authenticate(uid=self.uid)
class TwitterProfile(models.Model):
user = models.ForeignKey(User)
site = models.ForeignKey(Site, default=Site.objects.get_current)
twitter_id = models.PositiveIntegerField()
def __unicode__(self):
try:
return u'%s: %s' % (self.user, self.twitter_id)
except User.DoesNotExist:
return u'None'
def authenticate(self):
return authenticate(twitter_id=self.twitter_id)
class OpenIDProfile(models.Model):
user = models.ForeignKey(User)
site = models.ForeignKey(Site, default=Site.objects.get_current)
identity = models.TextField()
def __unicode__(self):
try:
return 'OpenID profile for %s, via provider %s' % (self.user, self.identity)
except User.DoesNotExist:
return 'OpenID profile for None, via provider None'
def authenticate(self):
return authenticate(identity=self.identity)
class OpenIDStore(models.Model):
site = models.ForeignKey(Site, default=Site.objects.get_current)
server_url = models.CharField(max_length=255)
handle = models.CharField(max_length=255)
secret = models.TextField()
issued = models.IntegerField()
lifetime = models.IntegerField()
assoc_type = models.TextField()
def __unicode__(self):
return u'OpenID Store %s for %s' % (self.server_url, self.site)
class OpenIDNonce(models.Model):
server_url = models.CharField(max_length=255)
timestamp = models.IntegerField()
salt = models.CharField(max_length=255)
date_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return u'OpenID Nonce for %s' % self.server_url
| {
"content_hash": "caaa438c71df9cb8dd648f1f1a1ddce6",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 88,
"avg_line_length": 33.338235294117645,
"alnum_prop": 0.6749007498897222,
"repo_name": "0101/django-socialregistration",
"id": "9d692b798d7ad703aebb15ab00e769eae9aa6972",
"size": "2267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "socialregistration/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49646"
}
],
"symlink_target": ""
} |
import os
import sys
import shutil
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.utils import (safe_makedir, file_exists, is_gzipped)
from bcbio.provenance import do
from bcbio import bam, utils
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
CLEANUP_FILES = ["Aligned.out.sam", "Log.out", "Log.progress.out"]
ALIGN_TAGS = ["NH", "HI", "NM", "MD", "AS"]
def align(fastq_file, pair_file, ref_file, names, align_dir, data):
config = data["config"]
out_prefix = os.path.join(align_dir, dd.get_lane(data))
out_file = out_prefix + "Aligned.out.sam"
out_dir = os.path.join(align_dir, "%s_star" % dd.get_lane(data))
if not ref_file:
logger.error("STAR index not found. We don't provide the STAR indexes "
"by default because they are very large. You can install "
"the index for your genome with: bcbio_nextgen.py upgrade "
"--aligners star --genomes genome-build-name --data")
sys.exit(1)
final_out = os.path.join(out_dir, "{0}.bam".format(names["sample"]))
if file_exists(final_out):
return final_out
star_path = config_utils.get_program("STAR", config)
fastq = " ".join([fastq_file, pair_file]) if pair_file else fastq_file
num_cores = config["algorithm"].get("num_cores", 1)
safe_makedir(align_dir)
cmd = ("{star_path} --genomeDir {ref_file} --readFilesIn {fastq} "
"--runThreadN {num_cores} --outFileNamePrefix {out_prefix} "
"--outReadsUnmapped Fastx --outFilterMultimapNmax 10 "
"--outStd SAM "
"--outSAMunmapped Within --outSAMattributes %s" % " ".join(ALIGN_TAGS))
cmd = cmd + " --readFilesCommand zcat " if is_gzipped(fastq_file) else cmd
cmd += _read_group_option(names)
fusion_mode = utils.get_in(data, ("config", "algorithm", "fusion_mode"), False)
if fusion_mode:
cmd += " --chimSegmentMin 15 --chimJunctionOverhangMin 15"
strandedness = utils.get_in(data, ("config", "algorithm", "strandedness"),
"unstranded").lower()
if strandedness == "unstranded":
cmd += " --outSAMstrandField intronMotif "
if dd.get_rsem(data):
cmd += " --quantMode TranscriptomeSAM "
with tx_tmpdir(data) as tmp_dir:
sam_to_bam = bam.sam_to_bam_stream_cmd(config)
sort = bam.sort_cmd(config, tmp_dir)
cmd += "| {sam_to_bam} | {sort} -o {tx_final_out} "
run_message = "Running STAR aligner on %s and %s" % (fastq_file, ref_file)
with file_transaction(data, final_out) as tx_final_out:
do.run(cmd.format(**locals()), run_message, None)
if dd.get_rsem(data):
transcriptome_file = _move_transcriptome_file(out_dir, names)
return final_out
def _move_transcriptome_file(out_dir, names):
out_file = os.path.join(out_dir, "{0}.transcriptome.bam".format(names["sample"]))
if not file_exists(out_file):
tmp_file = os.path.join(out_dir, os.pardir,
"{0}Aligned.toTranscriptome.out.bam".format(names["lane"]))
shutil.move(tmp_file, out_file)
return out_file
def _read_group_option(names):
rg_id = names["rg"]
rg_sample = names["sample"]
rg_library = names["pl"]
rg_platform_unit = names["pu"]
return (" --outSAMattrRGline ID:{rg_id} PL:{rg_library} "
"PU:{rg_platform_unit} SM:{rg_sample} ").format(**locals())
def _get_quality_format(config):
qual_format = config["algorithm"].get("quality_format", None)
if qual_format.lower() == "illumina":
return "fastq-illumina"
elif qual_format.lower() == "solexa":
return "fastq-solexa"
else:
return "fastq-sanger"
def remap_index_fn(ref_file):
"""Map sequence references to equivalent star indexes
"""
return os.path.join(os.path.dirname(os.path.dirname(ref_file)), "star")
def index(ref_file, out_dir, data):
"""Create a STAR index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = os.path.join(ref_dir, os.pardir, "rnaseq", "ref-transcripts.gtf")
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a star index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
cmd = ("STAR --genomeDir {tx_out_dir} --genomeFastaFiles {ref_file} "
"--runThreadN {num_cores} "
"--runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile {gtf_file}")
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir
| {
"content_hash": "86d7e7abe137fabf87beca65740a6eb8",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 91,
"avg_line_length": 42.36206896551724,
"alnum_prop": 0.6202686202686203,
"repo_name": "SciLifeLab/bcbio-nextgen",
"id": "34eb5b2263504e4c38d048d5bb7ba3fff5129160",
"size": "4914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bcbio/ngsalign/star.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5592"
},
{
"name": "Python",
"bytes": "1166888"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "10019"
}
],
"symlink_target": ""
} |
"""This module defines an interface for finding named resources.
Due to license restrictions, not all software dependences can be shipped with
PerfKitBenchmarker.
Those that can be included in perfkitbenchmarker/data, or
perfkitbenchmarker/scripts and are loaded via a PackageResourceLoader.
Users can specify additional paths to search for required data files using the
`--data_search_paths` flag.
"""
import abc
import logging
import os
import shutil
from absl import flags
import perfkitbenchmarker
from perfkitbenchmarker import temp_dir
import pkg_resources
import six
FLAGS = flags.FLAGS
flags.DEFINE_multi_string('data_search_paths', ['.'],
'Additional paths to search for data files. '
'These paths will be searched prior to using files '
'bundled with PerfKitBenchmarker.')
_RESOURCES = 'resources'
class ResourceNotFound(ValueError):
"""Error raised when a resource could not be found on the search path."""
pass
class ResourceLoader(six.with_metaclass(abc.ABCMeta, object)):
"""An interface for loading named resources."""
@abc.abstractmethod
def ResourceExists(self, name):
"""Checks for existence of the resource 'name'.
Args:
name: string. Name of the resource. Typically a file name.
Returns:
A boolean indicating whether the resource 'name' can be loaded by this
object.
"""
pass
@abc.abstractmethod
def ResourcePath(self, name):
"""Gets the path to the resource 'name'.
Args:
name: string. Name of the resource. Typically a file name.
Returns:
A full path to 'name' on the filesystem.
Raises:
ResourceNotFound: If 'name' was not found.
"""
pass
class FileResourceLoader(ResourceLoader):
"""Loads resources from a directory in the filesystem.
Attributes:
path: string. Root path to load resources from.
"""
def __init__(self, path):
super().__init__()
self.path = path
if not os.path.isdir(path):
logging.warn('File resource loader root %s is not a directory.', path)
def __repr__(self):
return '<{0} path="{1}">'.format(type(self).__name__, self.path)
def _Join(self, *args):
return os.path.join(self.path, *args)
def ResourceExists(self, name):
return os.path.exists(self._Join(name))
def ResourcePath(self, name):
if not self.ResourceExists(name):
raise ResourceNotFound(name)
return self._Join(name)
class PackageResourceLoader(ResourceLoader):
"""Loads resources from a Python package.
Attributes:
package: string. Name of the package containing resources.
"""
def __init__(self, package):
super().__init__()
self.package = package
def __repr__(self):
return '<{0} package="{1}">'.format(type(self).__name__, self.package)
def ResourceExists(self, name):
return pkg_resources.resource_exists(self.package, name)
def ResourcePath(self, name):
if not self.ResourceExists(name):
raise ResourceNotFound(name)
try:
path = pkg_resources.resource_filename(self.package, name)
except NotImplementedError:
# This can happen if PerfKit Benchmarker is executed from a zip file.
# Extract the resource to the version-specific temporary directory.
path = os.path.join(temp_dir.GetVersionDirPath(), _RESOURCES, name)
if not os.path.exists(path):
dir_path = os.path.dirname(path)
try:
os.makedirs(dir_path)
except OSError:
if not os.path.isdir(dir_path):
raise
with open(path, 'wb') as extracted_file:
shutil.copyfileobj(pkg_resources.resource_stream(self.package, name),
extracted_file)
return path
DATA_PACKAGE_NAME = 'perfkitbenchmarker.data'
YCSB_WORKLOAD_DIR_NAME = os.path.join(
os.path.dirname(perfkitbenchmarker.__file__), 'data/ycsb')
EDW_SCRIPT_DIR_NAME = os.path.join(
os.path.dirname(perfkitbenchmarker.__file__), 'data/edw')
SCRIPT_PACKAGE_NAME = 'perfkitbenchmarker.scripts'
CONFIG_PACKAGE_NAME = 'perfkitbenchmarker.configs'
DEFAULT_RESOURCE_LOADERS = [PackageResourceLoader(DATA_PACKAGE_NAME),
FileResourceLoader(YCSB_WORKLOAD_DIR_NAME),
FileResourceLoader(EDW_SCRIPT_DIR_NAME),
PackageResourceLoader(SCRIPT_PACKAGE_NAME),
PackageResourceLoader(CONFIG_PACKAGE_NAME)]
def _GetResourceLoaders():
"""Gets a list of registered ResourceLoaders.
Returns:
List of ResourceLoader instances. FileResourceLoaders for paths in
FLAGS.data_search_paths will be listed first, followed by
DEFAULT_RESOURCE_LOADERS.
"""
loaders = []
# Add all paths to list if they are specified on the command line (will warn
# if any are invalid).
# Otherwise add members of the default list iff they exist.
if FLAGS['data_search_paths'].present:
for path in FLAGS.data_search_paths:
loaders.append(FileResourceLoader(path))
else:
for path in FLAGS.data_search_paths:
if os.path.isdir(path):
loaders.append(FileResourceLoader(path))
loaders.extend(DEFAULT_RESOURCE_LOADERS)
return loaders
def ResourcePath(resource_name, search_user_paths=True):
"""Gets the filename of a resource.
Loaders are searched in order until the resource is found.
If no loader provides 'resource_name', an exception is thrown.
If 'search_user_paths' is true, the directories specified by
"--data_search_paths" are consulted before the default paths.
Args:
resource_name: string. Name of a resource.
search_user_paths: boolean. Whether paths from "--data_search_paths" should
be searched before the default paths.
Returns:
A path to the resource on the filesystem.
Raises:
ResourceNotFound: When resource was not found.
"""
if search_user_paths:
loaders = _GetResourceLoaders()
else:
loaders = DEFAULT_RESOURCE_LOADERS
for loader in loaders:
if loader.ResourceExists(resource_name):
return loader.ResourcePath(resource_name)
raise ResourceNotFound(
'{0} (Searched: {1})'.format(resource_name, loaders))
def ResourceExists(resource_name, search_user_paths=True):
"""Returns True if a resource exists.
Loaders are searched in order until the resource is found.
If no loader provides 'resource_name', returns False.
If 'search_user_paths' is true, the directories specified by
"--data_search_paths" are consulted before the default paths.
Args:
resource_name: string. Name of a resource.
search_user_paths: boolean. Whether paths from "--data_search_paths" should
be searched before the default paths.
Returns:
Whether the resource exists.
"""
try:
ResourcePath(resource_name, search_user_paths)
return True
except ResourceNotFound:
return False
| {
"content_hash": "170ab2ec822487aa0e9b685678bcacff",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 79,
"avg_line_length": 30.622222222222224,
"alnum_prop": 0.6882438316400581,
"repo_name": "GoogleCloudPlatform/PerfKitBenchmarker",
"id": "d4f124bfaf6fbf17dd8397486324c7a9d1b12096",
"size": "7500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/data/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3420"
},
{
"name": "HTML",
"bytes": "113073"
},
{
"name": "Jinja",
"bytes": "62005"
},
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "6076512"
},
{
"name": "R",
"bytes": "1017"
},
{
"name": "Shell",
"bytes": "76164"
},
{
"name": "Tcl",
"bytes": "14601"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2012 Legoktm
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import pywikibot
import robot
import mwparserfromhell
import tagger
TagBot = tagger.TaggerBot('WikiProject Requested articles')
TagBot.init()
class RAUpdaterBot(robot.Robot):
def __init__(self):
robot.Robot.__init__(self, task=26)
self.TagBot = tagger.TaggerBot('WikiProject Requested articles')
self.TagBot.init()
self.edit_count = 11
def gen(self):
page = pywikibot.Page(self.site, 'Wikipedia:Requested articles')
links = page.linkedPages(namespaces=[4])
for pg in links:
if pg.title().startswith('Wikipedia:Requested articles/'):
yield pg
def parse_page(self, page):
print page
content = page.get()
lines = content.splitlines()
delete_these = list()
for item in lines:
try:
if self.parse_line(item):
delete_these.append(item)
except pywikibot.exceptions.InvalidTitle:
pass
except IndexError:
pass
for l in delete_these:
del lines[lines.index(l)]
newtext = '\n'.join(lines)
pywikibot.showDiff(content, newtext)
if content != newtext:
page.put(newtext, 'Bot: removing completed requests. Want to help? Join the [[Wikipedia:WikiProject Requested articles/Backlog Drive|WikiProject Requested articles Backlog Drive]] today!')
return True
return False
def parse_line(self, line):
if line.isspace():
return
elif line.startswith('=='):
return
name = None
code = mwparserfromhell.parse(line)
for template in code.filter_templates():
if template.name.lower().strip() == 'req':
name = template.get(1).value
break
if not name:
#find the first wikilink and check it
key1 = line.find('[[')
if key1 == -1:
#no links???
return
p1 = line[key1+2]
key2 = p1.find(']]')
if key2 == -1:
#no ending????
return
p2 = p1[:key2]
name = p2.split('|')[0] #fix piped links
pg = pywikibot.Page(self.site, name)
print pg
if pg.exists():
if (not pg.isRedirectPage()) and (not pg.isDisambig()):
#IT EXISTS
print pg.title()+' exists!'
talk = pg.toggleTalkPage()
if self.TagBot.tag_page(talk):
self.edit_count += 1
return True
#print pg.title()+' doesn\'t exist :('
def run(self):
self.updated = list()
for page in self.gen():
res = self.parse_page(page)
if res:
self.edit_count += 1
self.updated.append(page)
if self.edit_count >= 50:
break
if __name__ == "__main__":
bot = RAUpdaterBot()
try:
bot.run()
finally:
log = pywikibot.Page(bot.site, 'User:Legobot/Logs/26')
text = log.text +'\n' + '\n'.join(['* Updated [[%s]]' % p.title() for p in bot.updated])
log.put(text, 'Bot: Updating log in userspace')
#parse_page(pywikibot.Page(site, 'Wikipedia:Requested articles/Applied arts and sciences/Medicine'))
| {
"content_hash": "71bb682aee95f8268e8298364008776a",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 200,
"avg_line_length": 34.6953125,
"alnum_prop": 0.5935600090069804,
"repo_name": "legoktm/pywikipedia-scripts",
"id": "a1b0d27477cc8f0a078db48fc419485883a72702",
"size": "4463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "req_article_cleanup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "290594"
}
],
"symlink_target": ""
} |
"""
SQLAlchemy-i18n
---------------
Internationalization extension for SQLAlchemy models.
"""
import os
import re
import sys
from setuptools import setup
PY3 = sys.version_info[0] == 3
HERE = os.path.dirname(os.path.abspath(__file__))
def get_version():
filename = os.path.join(HERE, 'sqlalchemy_i18n', '__init__.py')
with open(filename) as f:
contents = f.read()
pattern = r"^__version__ = '(.*?)'$"
return re.search(pattern, contents, re.MULTILINE).group(1)
extras_require = {
'test': [
'pytest>=2.2.3',
'Pygments>=1.2',
'Jinja2>=2.3',
'docutils>=0.10',
'flexmock>=0.9.7',
'psycopg2>=2.4.6',
'flake8>=2.4.0',
'isort>=3.9.6'
]
}
setup(
name='SQLAlchemy-i18n',
version=get_version(),
url='https://github.com/kvesteri/sqlalchemy-i18n',
license='BSD',
author='Konsta Vesterinen',
author_email='konsta@fastmonkeys.com',
description='Internationalization extension for SQLAlchemy models.',
long_description=__doc__,
packages=['sqlalchemy_i18n'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'SQLAlchemy>=1.3',
'SQLAlchemy-Utils>=0.37.3',
'six>=1.4.1'
],
python_requires='>=3.6',
extras_require=extras_require,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| {
"content_hash": "40bc2606d448e8729143709894c35530",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 72,
"avg_line_length": 26.48611111111111,
"alnum_prop": 0.5862611431567908,
"repo_name": "kvesteri/sqlalchemy-i18n",
"id": "9971cc06efd0dd2576f641f1aecade1b650c80a9",
"size": "1907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "85844"
}
],
"symlink_target": ""
} |
"""Clustering Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as random_seed_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_clustering_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.embedding_ops import embedding_lookup
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_clustering_ops import *
# pylint: enable=wildcard-import
# Euclidean distance between vectors U and V is defined as \\(||U - V||_F\\)
# which is the square root of the sum of the absolute squares of the elements
# difference.
SQUARED_EUCLIDEAN_DISTANCE = 'squared_euclidean'
# Cosine distance between vectors U and V is defined as
# \\(1 - (U \dot V) / (||U||_F ||V||_F)\\)
COSINE_DISTANCE = 'cosine'
RANDOM_INIT = 'random'
KMEANS_PLUS_PLUS_INIT = 'kmeans_plus_plus'
KMC2_INIT = 'kmc2'
# The name of the variable holding the cluster centers. Used by the Estimator.
CLUSTERS_VAR_NAME = 'clusters'
class KMeans(object):
"""Creates the graph for k-means clustering."""
def __init__(self,
inputs,
num_clusters,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=False,
mini_batch_steps_per_iteration=1,
random_seed=0,
kmeans_plus_plus_num_retries=2,
kmc2_chain_length=200):
"""Creates an object for generating KMeans clustering graph.
This class implements the following variants of K-means algorithm:
If use_mini_batch is False, it runs standard full batch K-means. Each step
runs a single iteration of K-Means. This step can be run sharded across
multiple workers by passing a list of sharded inputs to this class. Note
however that a single step needs to process the full input at once.
If use_mini_batch is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of mini_batch_steps_per_iteration steps. Two copies of cluster
centers are maintained: one that is updated at the end of each iteration,
and one that is updated every step. The first copy is used to compute
cluster allocations for each step, and for inference, while the second copy
is the one updated each step using the mini-batch update rule. After each
iteration is complete, this second copy is copied back the first copy.
Note that for use_mini_batch=True, when mini_batch_steps_per_iteration=1,
the algorithm reduces to the standard mini-batch algorithm. Also by setting
mini_batch_steps_per_iteration = num_inputs / batch_size, the algorithm
becomes an asynchronous version of the full-batch algorithm. Note however
that there is no guarantee by this implementation that each input is seen
exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not behave
exactly like a full-batch version.
Args:
inputs: An input tensor or list of input tensors. It is assumed that the
data points have been previously randomly permuted.
num_clusters: An integer tensor specifying the number of clusters. This
argument is ignored if initial_clusters is a tensor or numpy array.
initial_clusters: Specifies the clusters used during initialization. One
of the following: - a tensor or numpy array with the initial cluster
centers. - a function f(inputs, k) that returns up to k centers from
`inputs`.
- "random": Choose centers randomly from `inputs`.
- "kmeans_plus_plus": Use kmeans++ to choose centers from `inputs`.
- "kmc2": Use the fast k-MC2 algorithm to choose centers from `inputs`.
In the last three cases, one batch of `inputs` may not yield
`num_clusters` centers, in which case initialization will require
multiple batches until enough centers are chosen. In the case of
"random" or "kmeans_plus_plus", if the input size is <= `num_clusters`
then the entire batch is chosen to be cluster centers.
distance_metric: Distance metric used for clustering. Supported options:
"squared_euclidean", "cosine".
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: Number of steps after which the updated
cluster centers are synced back to a master copy.
random_seed: Seed for PRNG used to initialize seeds.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
kmc2_chain_length: Determines how many candidate points are used by the
k-MC2 algorithm to produce one new cluster centers. If a (mini-)batch
contains less points, one new cluster center is generated from the
(mini-)batch.
Raises:
ValueError: An invalid argument was passed to initial_clusters or
distance_metric.
"""
initialization_algorithms = [RANDOM_INIT, KMEANS_PLUS_PLUS_INIT, KMC2_INIT]
if isinstance(initial_clusters,
str) and initial_clusters not in initialization_algorithms:
raise ValueError(
f'Unsupported initialization algorithm `{initial_clusters}`,'
f'must be one of `{initialization_algorithms}`.')
distance_metrics = [SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE]
if distance_metric not in distance_metrics:
raise ValueError(f'Unsupported distance metric `{distance_metric}`,'
f'must be one of `{distance_metrics}`.')
self._inputs = inputs if isinstance(inputs, list) else [inputs]
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = int(mini_batch_steps_per_iteration)
self._seed = random_seed_ops.get_seed(random_seed)[0]
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._kmc2_chain_length = kmc2_chain_length
@classmethod
def _distance_graph(cls, inputs, clusters, distance_metric):
"""Computes distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
distance_metric: distance metric used for clustering
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
Currently only Euclidean distance and cosine distance are supported.
"""
assert isinstance(inputs, list)
if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:
return cls._compute_euclidean_distance(inputs, clusters)
elif distance_metric == COSINE_DISTANCE:
return cls._compute_cosine_distance(
inputs, clusters, inputs_normalized=True)
else:
assert False, str(distance_metric)
@classmethod
def _compute_euclidean_distance(cls, inputs, clusters):
"""Computes Euclidean distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
"""
output = []
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
# Computes Euclidean distance. Note the first and third terms are
# broadcast additions.
squared_distance = (
math_ops.reduce_sum(math_ops.square(inp), 1, keepdims=True) -
2 * math_ops.matmul(inp, clusters, transpose_b=True) +
array_ops.transpose(
math_ops.reduce_sum(
math_ops.square(clusters), 1, keepdims=True)))
output.append(squared_distance)
return output
@classmethod
def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):
"""Computes cosine distance between each input and each cluster center.
Args:
inputs: list of input Tensor.
clusters: cluster Tensor
inputs_normalized: if True, it assumes that inp and clusters are
normalized and computes the dot product which is equivalent to the
cosine distance. Else it L2 normalizes the inputs first.
Returns:
list of Tensors, where each element corresponds to each element in inp.
The value is the distance of each row to all the cluster centers.
"""
output = []
if not inputs_normalized:
with ops.colocate_with(clusters, ignore_existing=True):
clusters = nn_impl.l2_normalize(clusters, axis=1)
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
if not inputs_normalized:
inp = nn_impl.l2_normalize(inp, axis=1)
output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))
return output
def _infer_graph(self, inputs, clusters):
"""Maps input to closest cluster and the score.
Args:
inputs: list of input Tensors.
clusters: Tensor of cluster centers.
Returns:
List of tuple, where each value in tuple corresponds to a value in inp.
The tuple has following three elements:
all_scores: distance of each input to each cluster center.
score: distance of each input to closest cluster center.
cluster_idx: index of cluster center closest to the corresponding input.
"""
assert isinstance(inputs, list)
# Pairwise distances are used only by transform(). In all other cases, this
# sub-graph is not evaluated.
scores = self._distance_graph(inputs, clusters, self._distance_metric)
output = []
if (self._distance_metric == COSINE_DISTANCE and
not self._clusters_l2_normalized()):
# The cosine distance between normalized vectors x and y is the same as
# 2 * squared_euclidean_distance. We are using this fact and reusing the
# nearest_neighbors op.
# TODO(ands): Support COSINE distance in nearest_neighbors and remove
# this.
with ops.colocate_with(clusters, ignore_existing=True):
clusters = nn_impl.l2_normalize(clusters, axis=1)
for inp, score in zip(inputs, scores):
with ops.colocate_with(inp, ignore_existing=True):
(indices,
distances) = gen_clustering_ops.nearest_neighbors(inp, clusters, 1)
if self._distance_metric == COSINE_DISTANCE:
distances *= 0.5
output.append(
(score, array_ops.squeeze(distances,
[-1]), array_ops.squeeze(indices, [-1])))
return zip(*output)
def _clusters_l2_normalized(self):
"""Returns True if clusters centers are kept normalized."""
return (self._distance_metric == COSINE_DISTANCE and
(not self._use_mini_batch or
self._mini_batch_steps_per_iteration > 1))
def _create_variables(self, num_clusters):
"""Creates variables.
Args:
num_clusters: an integer Tensor providing the number of clusters.
Returns:
Tuple with following elements:
- cluster_centers: a Tensor for storing cluster centers
- cluster_centers_initialized: bool Variable indicating whether clusters
are initialized.
- cluster_counts: a Tensor for storing counts of points assigned to this
cluster. This is used by mini-batch training.
- cluster_centers_updated: Tensor representing copy of cluster centers
that are updated every step.
- update_in_steps: numbers of steps left before we sync
cluster_centers_updated back to cluster_centers.
"""
init_value = array_ops.placeholder_with_default([], shape=None)
cluster_centers = variable_scope.variable(
init_value, name=CLUSTERS_VAR_NAME, validate_shape=False)
cluster_centers_initialized = variable_scope.variable(
False, dtype=dtypes.bool, name='initialized')
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
# Copy of cluster centers actively updated each step according to
# mini-batch update rule.
cluster_centers_updated = variable_scope.variable(
init_value, name='clusters_updated', validate_shape=False)
# How many steps till we copy the updated clusters to cluster_centers.
update_in_steps = variable_scope.variable(
self._mini_batch_steps_per_iteration,
dtype=dtypes.int64,
name='update_in_steps')
# Count of points assigned to cluster_centers_updated.
cluster_counts = variable_scope.variable(
array_ops.zeros([num_clusters], dtype=dtypes.int64))
else:
cluster_centers_updated = cluster_centers
update_in_steps = None
cluster_counts = (
variable_scope.variable(
array_ops.ones([num_clusters], dtype=dtypes.int64))
if self._use_mini_batch else None)
return (cluster_centers, cluster_centers_initialized, cluster_counts,
cluster_centers_updated, update_in_steps)
@classmethod
def _l2_normalize_data(cls, inputs):
"""Normalized the input data."""
output = []
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
output.append(nn_impl.l2_normalize(inp, dim=1))
return output
def training_graph(self):
"""Generate a training graph for kmeans algorithm.
This returns, among other things, an op that chooses initial centers
(init_op), a boolean variable that is set to True when the initial centers
are chosen (cluster_centers_initialized), and an op to perform either an
entire Lloyd iteration or a mini-batch of a Lloyd iteration (training_op).
The caller should use these components as follows. A single worker should
execute init_op multiple times until cluster_centers_initialized becomes
True. Then multiple workers may execute training_op any number of times.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
cluster_centers_initialized: scalar indicating whether clusters have been
initialized.
init_op: an op to initialize the clusters.
training_op: an op that runs an iteration of training.
"""
# Implementation of kmeans.
if (isinstance(self._initial_clusters, str) or
callable(self._initial_clusters)):
initial_clusters = self._initial_clusters
num_clusters = ops.convert_to_tensor(self._num_clusters)
else:
initial_clusters = ops.convert_to_tensor(self._initial_clusters)
num_clusters = array_ops.shape(initial_clusters)[0]
inputs = self._inputs
(cluster_centers_var, cluster_centers_initialized, total_counts,
cluster_centers_updated,
update_in_steps) = self._create_variables(num_clusters)
init_op = _InitializeClustersOpFactory(
self._inputs, num_clusters, initial_clusters, self._distance_metric,
self._seed, self._kmeans_plus_plus_num_retries, self._kmc2_chain_length,
cluster_centers_var, cluster_centers_updated,
cluster_centers_initialized).op()
cluster_centers = cluster_centers_var
if self._distance_metric == COSINE_DISTANCE:
inputs = self._l2_normalize_data(inputs)
if not self._clusters_l2_normalized():
cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1)
all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
if self._use_mini_batch:
sync_updates_op = self._mini_batch_sync_updates_op(
update_in_steps, cluster_centers_var, cluster_centers_updated,
total_counts)
assert sync_updates_op is not None
with ops.control_dependencies([sync_updates_op]):
training_op = self._mini_batch_training_op(inputs, cluster_idx,
cluster_centers_updated,
total_counts)
else:
assert cluster_centers == cluster_centers_var
training_op = self._full_batch_training_op(inputs, num_clusters,
cluster_idx,
cluster_centers_var)
return (all_scores, cluster_idx, scores, cluster_centers_initialized,
init_op, training_op)
def _mini_batch_sync_updates_op(self, update_in_steps, cluster_centers_var,
cluster_centers_updated, total_counts):
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
assert update_in_steps is not None
with ops.colocate_with(update_in_steps, ignore_existing=True):
def _f():
# Note that there is a race condition here, so we do a best effort
# updates here. We reset update_in_steps first so that other workers
# don't duplicate the updates. Also we update cluster_center_vars
# before resetting total_counts to avoid large updates to
# cluster_centers_updated based on partially updated
# cluster_center_vars.
with ops.control_dependencies([
state_ops.assign(update_in_steps,
self._mini_batch_steps_per_iteration - 1)
]):
with ops.colocate_with(
cluster_centers_updated, ignore_existing=True):
if self._distance_metric == COSINE_DISTANCE:
cluster_centers = nn_impl.l2_normalize(
cluster_centers_updated, dim=1)
else:
cluster_centers = cluster_centers_updated
with ops.colocate_with(cluster_centers_var, ignore_existing=True):
with ops.control_dependencies(
[state_ops.assign(cluster_centers_var, cluster_centers)]):
with ops.colocate_with(None, ignore_existing=True):
with ops.control_dependencies([
state_ops.assign(total_counts,
array_ops.zeros_like(total_counts))
]):
return array_ops.identity(update_in_steps)
return control_flow_ops.cond(
update_in_steps <= 0, _f,
lambda: state_ops.assign_sub(update_in_steps, 1))
else:
return control_flow_ops.no_op()
def _mini_batch_training_op(self, inputs, cluster_idx_list, cluster_centers,
total_counts):
"""Creates an op for training for mini batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
total_counts: Tensor Ref of cluster counts.
Returns:
An op for doing an update of mini-batch k-means.
"""
update_ops = []
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp, ignore_existing=True):
assert total_counts is not None
cluster_idx = array_ops.reshape(cluster_idx, [-1])
# Dedupe the unique ids of cluster_centers being updated so that updates
# can be locally aggregated.
unique_ids, unique_idx = array_ops.unique(cluster_idx)
num_unique_cluster_idx = array_ops.size(unique_ids)
# Fetch the old values of counts and cluster_centers.
with ops.colocate_with(total_counts, ignore_existing=True):
old_counts = array_ops.gather(total_counts, unique_ids)
# TODO(agarwal): This colocation seems to run into problems. Fix it.
with ops.colocate_with(cluster_centers, ignore_existing=True):
old_cluster_centers = array_ops.gather(cluster_centers, unique_ids)
# Locally aggregate the increment to counts.
count_updates = math_ops.unsorted_segment_sum(
array_ops.ones_like(unique_idx, dtype=total_counts.dtype),
unique_idx, num_unique_cluster_idx)
# Locally compute the sum of inputs mapped to each id.
# For a cluster with old cluster value x, old count n, and with data
# d_1,...d_k newly assigned to it, we recompute the new value as
# \\(x += (sum_i(d_i) - k * x) / (n + k)\\).
# Compute \\(sum_i(d_i)\\), see comment above.
cluster_center_updates = math_ops.unsorted_segment_sum(
inp, unique_idx, num_unique_cluster_idx)
# Shape to enable broadcasting count_updates and learning_rate to inp.
# It extends the shape with 1's to match the rank of inp.
broadcast_shape = array_ops.concat([
array_ops.reshape(num_unique_cluster_idx, [1]),
array_ops.ones(
array_ops.reshape(array_ops.rank(inp) - 1, [1]),
dtype=dtypes.int32)
], 0)
# Subtract k * x, see comment above.
cluster_center_updates -= math_ops.cast(
array_ops.reshape(count_updates, broadcast_shape),
inp.dtype) * old_cluster_centers
learning_rate = math_ops.reciprocal(
math_ops.cast(old_counts + count_updates, inp.dtype))
learning_rate = array_ops.reshape(learning_rate, broadcast_shape)
# scale by 1 / (n + k), see comment above.
cluster_center_updates *= learning_rate
# Apply the updates.
update_counts = state_ops.scatter_add(total_counts, unique_ids,
count_updates)
update_cluster_centers = state_ops.scatter_add(cluster_centers,
unique_ids,
cluster_center_updates)
update_ops.extend([update_counts, update_cluster_centers])
return control_flow_ops.group(*update_ops)
def _full_batch_training_op(self, inputs, num_clusters, cluster_idx_list,
cluster_centers):
"""Creates an op for training for full batch case.
Args:
inputs: list of input Tensors.
num_clusters: an integer Tensor providing the number of clusters.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
Returns:
An op for doing an update of mini-batch k-means.
"""
cluster_sums = []
cluster_counts = []
epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp, ignore_existing=True):
cluster_sums.append(
math_ops.unsorted_segment_sum(inp, cluster_idx, num_clusters))
cluster_counts.append(
math_ops.unsorted_segment_sum(
array_ops.reshape(
array_ops.ones(
array_ops.reshape(array_ops.shape(inp)[0], [-1])),
[-1, 1]), cluster_idx, num_clusters))
with ops.colocate_with(cluster_centers, ignore_existing=True):
new_clusters_centers = math_ops.add_n(cluster_sums) / (
math_ops.cast(math_ops.add_n(cluster_counts), cluster_sums[0].dtype) +
epsilon)
if self._clusters_l2_normalized():
new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
return state_ops.assign(cluster_centers, new_clusters_centers)
class _InitializeClustersOpFactory(object):
"""Internal class to create the op to initialize the clusters.
The op performs this algorithm (see constructor args):
num_remaining = num_clusters - length(cluster_centers)
if num_remaining == 0:
assert that cluster_centers_initialized is true
else:
assert that num_remaining > 0
new_centers = choose up to num_remaining initial centers
l2-normalize new_centers if using cosine distance
all_centers = concat(cluster_centers, new_centers)
cluster_centers := all_centers
if there is a cluster_centers_updated variable:
cluster_centers_updated := cluster_centers
num_now_remaining = num_clusters - length(cluster_centers)
if num_now_remaining == 0:
cluster_centers_initialized := true
"""
# TODO(ccolby): Refactor this class so that kmc2 isn't so much a special case.
def __init__(self, inputs, num_clusters, initial_clusters, distance_metric,
random_seed, kmeans_plus_plus_num_retries, kmc2_chain_length,
cluster_centers, cluster_centers_updated,
cluster_centers_initialized):
"""Creates an op factory.
Args:
inputs: See KMeans constructor.
num_clusters: An integer Tensor providing the number of clusters.
initial_clusters: See KMeans constructor.
distance_metric: See KMeans constructor.
random_seed: See KMeans constructor.
kmeans_plus_plus_num_retries: See KMeans constructor.
kmc2_chain_length: See KMeans constructor.
cluster_centers: The TF variable holding the initial centers. It may
already contain some centers when the op is executed.
cluster_centers_updated: A second TF variable to hold a copy of the
initial centers, used for full-batch mode. In mini-batch mode,
cluster_centers_updated is the same variable as cluster_centers.
cluster_centers_initialized: A boolean TF variable that will be set to
true when all the initial centers have been chosen.
"""
# All of these instance variables are constants.
self._inputs = inputs
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._kmc2_chain_length = kmc2_chain_length
self._cluster_centers = cluster_centers
self._cluster_centers_updated = cluster_centers_updated
self._cluster_centers_initialized = cluster_centers_initialized
self._num_selected = array_ops.shape(self._cluster_centers)[0]
self._num_remaining = self._num_clusters - self._num_selected
self._num_data = math_ops.add_n(
[array_ops.shape(i)[0] for i in self._inputs])
def _random(self):
indices = random_ops.random_uniform(
array_ops.reshape(self._num_remaining, [-1]),
minval=0,
maxval=math_ops.cast(self._num_data, dtypes.int64),
seed=self._seed,
dtype=dtypes.int64)
return embedding_lookup(self._inputs, indices, partition_strategy='div')
def _kmeans_plus_plus(self):
# Points from only the first shard are used for initializing centers.
# TODO(ands): Use all points.
inp = self._inputs[0]
if self._distance_metric == COSINE_DISTANCE:
inp = nn_impl.l2_normalize(inp, dim=1)
return gen_clustering_ops.kmeans_plus_plus_initialization(
inp, math_ops.cast(self._num_remaining, dtypes.int64), self._seed,
self._kmeans_plus_plus_num_retries)
def _kmc2_multiple_centers(self):
"""Adds new initial cluster centers using the k-MC2 algorithm.
In each call to the op, the provided batch is split into subsets based on
the specified `kmc2_chain_length`. On each subset, a single Markov chain of
the k-MC2 algorithm is used to add *one* new center cluster center. If there
are less than `kmc2_chain_length` points in the subset, a single center is
added using one Markov chain on the full input. It is assumed that the
provided batch has previously been randomly permuted. Otherwise, k-MC2 may
return suboptimal centers.
Returns:
An op that adds new cluster centers.
"""
# The op only operates on the first shard of data.
first_shard = self._inputs[0]
# Number of points in the input that can be used.
batch_size = array_ops.shape(first_shard)[0]
# Maximum number of subsets such that the size of each subset is at least
# `kmc2_chain_length`. Final subsets may be larger.
max_to_sample = math_ops.cast(
batch_size / self._kmc2_chain_length, dtype=dtypes.int32)
# We sample at least one new center and at most all remaining centers.
num_to_sample = math_ops.maximum(
math_ops.minimum(self._num_remaining, max_to_sample), 1)
def _cond(i, _):
"""Stopping condition for the while loop."""
return math_ops.less(i, num_to_sample)
def _body(i, _):
"""Body that adds a single new center based on a subset."""
def _sample_random():
"""Returns a random point as a cluster center."""
# By assumption the batch is reshuffled and _sample_random is always
# called for i=0. Hence, we simply return the first point.
new_center = array_ops.reshape(first_shard[0], [1, -1])
if self._distance_metric == COSINE_DISTANCE:
new_center = nn_impl.l2_normalize(new_center, dim=1)
return new_center
def _sample_kmc2_chain():
"""Returns previous centers as well as a new center sampled using k-MC2."""
# Extract the subset from the underlying batch.
start = i * self._kmc2_chain_length
end = start + self._kmc2_chain_length
subset = first_shard[start:end]
# Compute the distances from points in the subset to previous centers.
_, distances = gen_clustering_ops.nearest_neighbors(
subset, self._cluster_centers, 1)
# Sample index of new center using k-MC2 Markov chain.
new_center_index = gen_clustering_ops.kmc2_chain_initialization(
array_ops.squeeze(distances), self._seed)
# Extract actual new center.
newly_sampled_center = array_ops.reshape(subset[new_center_index],
[1, -1])
# Return concatenation with previously sampled centers.
if self._distance_metric == COSINE_DISTANCE:
newly_sampled_center = nn_impl.l2_normalize(
newly_sampled_center, dim=1)
return array_ops.concat([self._cluster_centers, newly_sampled_center],
0)
# Obtain a random point if there are no previously sampled centers.
# Otherwise, construct a k-MC2 Markov chain.
new_centers = control_flow_ops.cond(
math_ops.equal(self._num_selected, 0), _sample_random,
_sample_kmc2_chain)
# Assign new cluster centers to underlying variable.
assigned_centers = state_ops.assign(
self._cluster_centers, new_centers, validate_shape=False)
if self._cluster_centers_updated is not self._cluster_centers:
assigned_centers = state_ops.assign(
self._cluster_centers_updated,
assigned_centers,
validate_shape=False)
return i + 1, self._num_clusters - array_ops.shape(assigned_centers)[0]
# Add num_to_sample new data points.
_, num_remaining = control_flow_ops.while_loop(_cond, _body, [0, 0])
return num_remaining
def _greedy_batch_sampler(self, sampler):
# If the input dataset size is smaller than the number of centers
# remaining, choose the entire input dataset as centers. This can happen
# with mini-batch. Otherwise, sample the batch according to the provided
# sampler.
return control_flow_ops.cond(self._num_data <= self._num_remaining,
lambda: array_ops.concat(self._inputs, 0),
sampler)
def _single_batch_sampler(self, sampler):
# Enforce that there are at least as many data points as centers
# remaining. This gives the provided sampler the chance to select all
# remaining centers from a single batch.
with ops.control_dependencies(
[check_ops.assert_greater_equal(self._num_data, self._num_remaining)]):
return sampler()
def _choose_initial_centers(self):
if isinstance(self._initial_clusters, str):
if self._initial_clusters == RANDOM_INIT:
return self._greedy_batch_sampler(self._random)
else: # self._initial_clusters == KMEANS_PLUS_PLUS_INIT
return self._single_batch_sampler(self._kmeans_plus_plus)
elif callable(self._initial_clusters):
return self._initial_clusters(self._inputs, self._num_remaining)
else:
with ops.control_dependencies([
check_ops.assert_equal(self._num_remaining,
array_ops.shape(self._initial_clusters)[0])
]):
return self._initial_clusters
def _add_new_centers(self):
"""Adds some centers and returns the number of centers remaining."""
new_centers = self._choose_initial_centers()
if self._distance_metric == COSINE_DISTANCE:
new_centers = nn_impl.l2_normalize(new_centers, dim=1)
# If cluster_centers is empty, it doesn't have the right shape for concat.
all_centers = control_flow_ops.cond(
math_ops.equal(self._num_selected, 0), lambda: new_centers,
lambda: array_ops.concat([self._cluster_centers, new_centers], 0))
# TODO(ccolby): De-dupe all_centers?
a = state_ops.assign(
self._cluster_centers, all_centers, validate_shape=False)
if self._cluster_centers_updated is not self._cluster_centers:
a = state_ops.assign(
self._cluster_centers_updated, a, validate_shape=False)
return self._num_clusters - array_ops.shape(a)[0]
def _initialize(self):
with ops.control_dependencies([
check_ops.assert_positive(self._num_remaining),
]):
if self._initial_clusters == KMC2_INIT:
num_now_remaining = self._kmc2_multiple_centers()
else:
num_now_remaining = self._add_new_centers()
return control_flow_ops.cond(
math_ops.equal(num_now_remaining, 0),
lambda: state_ops.assign(self._cluster_centers_initialized, True),
control_flow_ops.no_op)
def op(self):
"""Returns the cluster initializer op."""
return control_flow_ops.cond(
math_ops.equal(self._num_remaining, 0),
lambda: check_ops.assert_equal(self._cluster_centers_initialized, True),
self._initialize)
| {
"content_hash": "8a2a9e0bb012585f72e165236b037626",
"timestamp": "",
"source": "github",
"line_count": 763,
"max_line_length": 83,
"avg_line_length": 46.61992136304063,
"alnum_prop": 0.6632369064687527,
"repo_name": "frreiss/tensorflow-fred",
"id": "2b04df177c31a641e38d8729a357f02927666f58",
"size": "36260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/clustering_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from typing import Dict, Optional, Tuple
import torch
def _calculate_bin_centers(boundaries: torch.Tensor):
step = boundaries[1] - boundaries[0]
bin_centers = boundaries + step / 2
bin_centers = torch.cat([bin_centers, (bin_centers[-1] + step).unsqueeze(-1)], dim=0)
return bin_centers
def _calculate_expected_aligned_error(
alignment_confidence_breaks: torch.Tensor,
aligned_distance_error_probs: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
bin_centers = _calculate_bin_centers(alignment_confidence_breaks)
return (
torch.sum(aligned_distance_error_probs * bin_centers, dim=-1),
bin_centers[-1],
)
def compute_predicted_aligned_error(
logits: torch.Tensor,
max_bin: int = 31,
no_bins: int = 64,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Computes aligned confidence metrics from logits.
Args:
logits: [*, num_res, num_res, num_bins] the logits output from
PredictedAlignedErrorHead.
max_bin: Maximum bin value
no_bins: Number of bins
Returns:
aligned_confidence_probs: [*, num_res, num_res, num_bins] the predicted
aligned error probabilities over bins for each residue pair.
predicted_aligned_error: [*, num_res, num_res] the expected aligned distance
error for each pair of residues.
max_predicted_aligned_error: [*] the maximum predicted error possible.
"""
boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
aligned_confidence_probs = torch.nn.functional.softmax(logits, dim=-1)
(predicted_aligned_error, max_predicted_aligned_error,) = _calculate_expected_aligned_error(
alignment_confidence_breaks=boundaries,
aligned_distance_error_probs=aligned_confidence_probs,
)
return {
"aligned_confidence_probs": aligned_confidence_probs,
"predicted_aligned_error": predicted_aligned_error,
"max_predicted_aligned_error": max_predicted_aligned_error,
}
def compute_tm(
logits: torch.Tensor,
residue_weights: Optional[torch.Tensor] = None,
max_bin: int = 31,
no_bins: int = 64,
eps: float = 1e-8,
**kwargs,
) -> torch.Tensor:
if residue_weights is None:
residue_weights = logits.new_ones(logits.shape[-2])
boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
bin_centers = _calculate_bin_centers(boundaries)
torch.sum(residue_weights)
n = logits.shape[-2]
clipped_n = max(n, 19)
d0 = 1.24 * (clipped_n - 15) ** (1.0 / 3) - 1.8
probs = torch.nn.functional.softmax(logits, dim=-1)
tm_per_bin = 1.0 / (1 + (bin_centers**2) / (d0**2))
predicted_tm_term = torch.sum(probs * tm_per_bin, dim=-1)
normed_residue_mask = residue_weights / (eps + residue_weights.sum())
per_alignment = torch.sum(predicted_tm_term * normed_residue_mask, dim=-1)
weighted = per_alignment * residue_weights
argmax = (weighted == torch.max(weighted)).nonzero()[0]
return per_alignment[tuple(argmax)]
| {
"content_hash": "4743062baf78ec7d4b96a86487751581",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 96,
"avg_line_length": 33.977777777777774,
"alnum_prop": 0.6635055591890124,
"repo_name": "huggingface/transformers",
"id": "4d60b1049137c5ceabfe9f60c6192c2af2895eef",
"size": "3692",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/transformers/models/esm/openfold_utils/loss.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
} |
"""
Module: Barcode Printer URLS
Project: Django BCP
Copyright: Adlibre Pty Ltd 2012
License: See LICENSE for license information
"""
try:
from django.conf.urls.defaults import *
except:
from django.conf.urls import patterns, url
#import mdtui.views
urlpatterns = patterns('bcp.views',
url(r'^(?P<barcode_type>(Standard39|Code128))/(?P<code>[\w-]+)$', 'generate', name='bcp-generate'),
url(r'^(?P<barcode_type>(Standard39|Code128))/(?P<code>[\w-]+)/print$', 'print_barcode', name='bcp-print'),
url(r'^(?P<barcode_type>(Standard39|Code128))/(?P<code>[\w-]+)/test', 'print_barcode_embed_example', name='bcp-embed-example'),
) | {
"content_hash": "52e30bb76d03f9fed5afd16778dbc0df",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 131,
"avg_line_length": 34,
"alnum_prop": 0.6795665634674922,
"repo_name": "adlibre/django-bcp",
"id": "d8f987fda296073ce6766e82f499734c1cc39db5",
"size": "646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bcp/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2481"
},
{
"name": "Python",
"bytes": "6929"
}
],
"symlink_target": ""
} |
import os
import pickle
_config_database = None
def _get_database_file_name():
home = os.path.expanduser("~")
db_file_name = home + "/.remoter"
return db_file_name
def _load_database():
global _config_database
if _config_database is not None:
return _config_database
# try:
# fp = open(_get_database_file_name())
# except:
# fp = None
#
# if fp is not None:
# _config_database = pickle.load(fp)
# else:
# _config_database = {}
_config_database = {}
# fp.close()
return _config_database
def _load_config_entry(name):
db = _load_database()
# print "loading keys for %s" % name
try:
return db[name]
except:
# print "no keys found for %s" % name
return {}
def _write_config_entry(name, obj):
db = _load_database()
db[name] = obj
fp = open(_get_database_file_name(), 'w')
pickle.dump(db, fp)
fp.close()
class ConfigDB:
def __init__(self, config_db_key):
self.__config_db_key = config_db_key
self.__config_entry = _load_config_entry(self.__config_db_key)
def get(self, name):
return self.__config_entry[name]
def set(self, name, obj):
self.__config_entry[name] = obj
_write_config_entry(self.__config_db_key, self.__config_entry)
def keys(self):
ret = []
for f in self.__config_entry:
ret.append(f)
return ret
def values(self):
ret = []
for f in self.keys():
ret.append(self.get(f))
return ret
| {
"content_hash": "af05ef8aacd8d6f42078e4a44fe95980",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 70,
"avg_line_length": 21.45945945945946,
"alnum_prop": 0.5554156171284634,
"repo_name": "nsubtil/remoter",
"id": "fda16e779f15f247c521db6f9e2704397e1815ba",
"size": "1588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17310"
},
{
"name": "Shell",
"bytes": "106"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from builtins import str
from .base_monitor import BaseMonitor
from kafka import KafkaProducer
from kafka.common import KafkaUnavailableError
from scutils.method_timer import MethodTimer
from retrying import retry
import json
import sys
import traceback
class KafkaBaseMonitor(BaseMonitor):
'''
Base monitor for handling outbound Kafka results
'''
def setup(self, settings):
'''
Setup the handler
@param settings: The loaded settings file
'''
self.producer = self._create_producer(settings)
self.topic_prefix = settings['KAFKA_TOPIC_PREFIX']
self.use_appid_topics = settings['KAFKA_APPID_TOPICS']
self.logger.debug("Successfully connected to Kafka in {name}"
.format(name=self.__class__.__name__))
@retry(wait_exponential_multiplier=500, wait_exponential_max=10000)
def _create_producer(self, settings):
"""Tries to establish a Kafka consumer connection"""
try:
brokers = settings['KAFKA_HOSTS']
self.logger.debug("Creating new kafka producer using brokers: " +
str(brokers))
return KafkaProducer(bootstrap_servers=brokers,
value_serializer=lambda m: json.dumps(m),
retries=3,
linger_ms=settings['KAFKA_PRODUCER_BATCH_LINGER_MS'],
buffer_memory=settings['KAFKA_PRODUCER_BUFFER_BYTES'])
except KeyError as e:
self.logger.error('Missing setting named ' + str(e),
{'ex': traceback.format_exc()})
except:
self.logger.error("Couldn't initialize kafka producer in plugin.",
{'ex': traceback.format_exc()})
raise
def _kafka_success(self, response):
'''
Callback for successful send
'''
self.logger.debug("Sent message to Kafka")
def _kafka_failure(self, response):
'''
Callback for failed send
'''
self.logger.error("Failed to send message to Kafka")
def _send_to_kafka(self, master):
'''
Sends the message back to Kafka
@param master: the final dict to send
@returns: True if successfully sent to kafka
'''
appid_topic = "{prefix}.outbound_{appid}".format(
prefix=self.topic_prefix,
appid=master['appid'])
firehose_topic = "{prefix}.outbound_firehose".format(
prefix=self.topic_prefix)
try:
# dont want logger in outbound kafka message
if self.use_appid_topics:
f1 = self.producer.send(appid_topic, master)
f1.add_callback(self._kafka_success)
f1.add_errback(self._kafka_failure)
f2 = self.producer.send(firehose_topic, master)
f2.add_callback(self._kafka_success)
f2.add_errback(self._kafka_failure)
return True
except Exception as ex:
message = "An exception '{0}' occured while sending a message " \
"to kafka. Arguments:\n{1!r}" \
.format(type(ex).__name__, ex.args)
self.logger.error(message)
return False
def close(self):
self.producer.flush()
self.producer.close(timeout=10)
| {
"content_hash": "573273ecd31ba6b387139902fe7011dd",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 87,
"avg_line_length": 36.58163265306123,
"alnum_prop": 0.5564853556485355,
"repo_name": "istresearch/scrapy-cluster",
"id": "fa13ae72105b5d4d5545e0c72ff990b22fe229eb",
"size": "3585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redis-monitor/plugins/kafka_base_monitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "46003"
},
{
"name": "Python",
"bytes": "450091"
},
{
"name": "Shell",
"bytes": "11968"
}
],
"symlink_target": ""
} |
import os
import os.path
import json
import shutil
from shutil import ignore_patterns
import collections
import errno
import sys
import codecs
import subprocess
def jwrite(src, key, value):
try:
jsonFile = open(src, 'r+')
jsonData = json.load(jsonFile, object_pairs_hook=collections.OrderedDict)
jsonData[key] = value
jsonFile.seek(0)
except IOError:
jsonFile = open(src, 'w')
jsonData = {key: value}
json.dump(jsonData, jsonFile, indent = 4)
jsonFile.truncate()
jsonFile.close()
def jwriteAdv(src, key, value, pos="", key2=""):
if pos is "" and key2 is "":
jwrite(src, key, value)
else:
try:
jsonFile = open(src, 'r+')
jsonData = json.load(jsonFile, object_pairs_hook=collections.OrderedDict)
if key2 is "":
jsonData[key][pos] = value
else:
jsonData[key][pos][key2] = value
jsonFile.seek(0)
except IOError:
jsonFile = open(src, 'w')
if key2 is "":
jsonData = {key: {pos: value}}
else:
jsonData = {key: {key2: value}}
jsonData = {key: value}
json.dump(jsonData, jsonFile, indent = 4)
jsonFile.truncate()
jsonFile.close()
# the root dir is three folders back
rootDirProject = os.path.realpath(__file__)
for i in range(2):
rootDirProject=os.path.split(rootDirProject)[0]
sElectDir = rootDirProject + "/sElect"
electionConfig = rootDirProject + "/_configFiles_/handlerConfigFile.json"
#get elections
jsonFile = open(electionConfig, 'r')
jsonData = json.load(jsonFile, object_pairs_hook=collections.OrderedDict)
elecs = jsonData["elections"]
jsonFile.close()
#resume elections which haven't been removed and update PIDs
for x in range (len(elecs)):
if x == 0:
print("Resuming elections...")
electionID = elecs[x]["electionID"]
startingTime = elecs[x]["startTime"]
numMix = elecs[x]["mixServers"]
tStamp = startingTime.replace("-", "").replace(":", "").split()
dstroot = os.path.join(rootDirProject, "elections/" + tStamp[0]+tStamp[1] + "_" + electionID + "_" + os.path.split(sElectDir)[1])
#restart all node servers
if os.path.exists(dstroot+"/CollectingServer/_data_/partialResult.msg"):
col = subprocess.Popen(["node", "collectingServer.js", "--serveResult"], cwd=(dstroot+"/CollectingServer"))
else:
col = subprocess.Popen(["node", "collectingServer.js", "--resume"], cwd=(dstroot+"/CollectingServer"))
mix = []
for z in range(numMix):
numMixStr = str(z)
if z < 10:
numMixStr = "0"+str(z)
if os.path.exists(dstroot+"/mix/"+numMixStr+"/_data_/ballots"+numMixStr+"_output.msg"):
mix.append(subprocess.Popen(["node", "mixServer.js", "--serveResult"], cwd=(dstroot+"/mix/"+numMixStr)))
else:
mix.append(subprocess.Popen(["node", "mixServer.js"], cwd=(dstroot+"/mix/"+numMixStr)))
if os.path.exists(dstroot+"/BulletinBoard/_data_/resultMIX"+str(numMix-1)+".msg"):
bb = subprocess.Popen(["node", "bb.js", "--serveResult"], cwd=(dstroot+"/BulletinBoard"))
else:
bb = subprocess.Popen(["node", "bb.js"], cwd=(dstroot+"/BulletinBoard"))
newPIDs = {"cs": col.pid, "bb": bb.pid}
for k in range(numMix):
newPIDs["m"+str(k)] = mix[k].pid
#print(str(x+1) + ". started " + str(electionID))
jwriteAdv(electionConfig, "elections", newPIDs, x, "processIDs")
print("...done.") | {
"content_hash": "fad70a4743b41e08f939917807b0aabf",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 133,
"avg_line_length": 35.504950495049506,
"alnum_prop": 0.6048522030117122,
"repo_name": "escapin/ElectionManager",
"id": "64917cf5d248cef322142f582fa6604ae58c35c1",
"size": "3586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/resumeElection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "65239"
},
{
"name": "HTML",
"bytes": "17136"
},
{
"name": "JavaScript",
"bytes": "595387"
},
{
"name": "Makefile",
"bytes": "3276"
},
{
"name": "Python",
"bytes": "74572"
},
{
"name": "Shell",
"bytes": "3463"
}
],
"symlink_target": ""
} |
import os
from datetime import timedelta
# starterbot's ID as an environment variable
BOT_ID = os.environ.get("BOT_ID")
SLACK_BOT_TOKEN = os.environ.get('SLACK_BOT_TOKEN')
# constants
AT_BOT = "<@" + BOT_ID + ">"
BOT_NAME = 'antibot'
DEFAULT_DELETE_DELAY = 20.0 # Default is 180 seconds
DB_NAME = "counter.db"
# Skirmish Constants
CHECKIN_FREQ_DAYS = 7 # Number of days between check-ins
RELAPSE_POINTS = -5
PER_DAY_POINTS = 1
BUFFER_DAYS_TEAM = timedelta(days=7)
SKIRMISH_NOADMIN_FUNCTIONS = ["players", "details"] | {
"content_hash": "f64a6a36dd8aac64c8466a12dda18037",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 56,
"avg_line_length": 27.263157894736842,
"alnum_prop": 0.7181467181467182,
"repo_name": "TheAntimist/antibot",
"id": "dffa588e7deab3d02b0e2b35d22250cfaef4a843",
"size": "518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39778"
}
],
"symlink_target": ""
} |
import rollbar
import uvicorn
from rollbar.contrib.starlette import ReporterMiddleware as RollbarMiddleware
from starlette.applications import Starlette
from starlette.responses import PlainTextResponse
# Initialize Rollbar SDK with your server-side ACCESS_TOKEN
rollbar.init(
'ACCESS_TOKEN',
environment='staging',
handler='async', # For asynchronous reporting use: default, async or httpx
)
# Integrate Rollbar with Starlette application
app = Starlette()
app.add_middleware(RollbarMiddleware) # should be added as the first middleware
# Verify application runs correctly
#
# $ curl http://localhost:8888
@app.route('/')
async def root(request):
return PlainTextResponse('hello world')
# Cause an uncaught exception to be sent to Rollbar
# GET query params will be sent to Rollbar and available in the UI
#
# $ curl http://localhost:8888/error?param1=hello¶m2=world
async def localfunc(arg1, arg2, arg3):
# Both local variables and function arguments will be sent to Rollbar
# and available in the UI
localvar = 'local variable'
cause_error_with_local_variables
@app.route('/error')
async def error(request):
await localfunc('func_arg1', 'func_arg2', 1)
return PlainTextResponse("You shouldn't be seeing this")
if __name__ == '__main__':
uvicorn.run(app, host='localhost', port=8888)
| {
"content_hash": "05e1605a61123af7087d3e860565c963",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 29.282608695652176,
"alnum_prop": 0.746844840386043,
"repo_name": "rollbar/pyrollbar",
"id": "24089263fbd096022185569502195b81f2cc1c00",
"size": "1691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rollbar/examples/starlette/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Nix",
"bytes": "1004"
},
{
"name": "Python",
"bytes": "383008"
}
],
"symlink_target": ""
} |
from __future__ import division, unicode_literals
"""
This module implements a FloatWithUnit, which is a subclass of float. It
also defines supported units for some commonly used units for energy, length,
temperature, time and charge. FloatWithUnit also support conversion to one
another, and additions and subtractions perform automatic conversion if
units are detected. An ArrayWithUnit is also implemented, which is a subclass
of numpy's ndarray with similar unit features.
"""
from six.moves import filter, zip
__author__ = "Shyue Ping Ong, Matteo Giantomassi"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong, Matteo Giantomassi"
__status__ = "Production"
__date__ = "Aug 30, 2013"
import numpy as np
import six
import collections
from numbers import Number
import numbers
from functools import partial
from pymatgen.core.physical_constants import N_a, e
import re
"""
Some conversion factors
"""
Ha_to_eV = 27.21138386
eV_to_Ha = 1 / Ha_to_eV
Ry_to_eV = Ha_to_eV / 2
amu_to_kg = 1.660538921e-27
mile_to_meters = 1609.347219
bohr_to_angstrom = 0.5291772083
bohr_to_ang = bohr_to_angstrom
"""
Definitions of supported units. Values below are essentially scaling and
conversion factors. What matters is the relative values, not the absolute.
The SI units must have factor 1.
"""
BASE_UNITS = {
"length": {
"m": 1,
"km": 1000,
"mile": mile_to_meters,
"ang": 1e-10,
"cm": 1e-2,
"pm": 1e-12,
"bohr": bohr_to_angstrom * 1e-10,
},
"mass": {
"kg": 1,
"g": 1e-3,
"amu": amu_to_kg,
},
"time": {
"s": 1,
"min": 60,
"h": 3600,
},
"current": {
"A": 1
},
"temperature": {
"K": 1,
},
"amount": {
"mol": 1,
"atom": 1 / N_a
},
"intensity": {
"cd": 1
},
"memory": {
"byte": 1,
"Kb": 1024,
"Mb": 1024**2,
"Gb": 1024**3,
"Tb": 1024**4,
},
}
# Accept kb, mb, gb ... as well.
BASE_UNITS["memory"].update({k.lower(): v for k, v in BASE_UNITS["memory"].items()})
#This current list are supported derived units defined in terms of powers of
#SI base units and constants.
DERIVED_UNITS = {
"energy": {
"eV": {"kg": 1, "m": 2, "s": -2, e: 1},
"Ha": {"kg": 1, "m": 2, "s": -2, e * Ha_to_eV: 1},
"Ry": {"kg": 1, "m": 2, "s": -2, e * Ry_to_eV: 1},
"J": {"kg": 1, "m": 2, "s": -2},
"kJ": {"kg": 1, "m": 2, "s": -2, 1000: 1}
},
"charge": {
"C": {"A": 1, "s": 1},
"e": {"A": 1, "s": 1, e: 1},
},
"force": {
"N": {"kg": 1, "m": 1, "s": -2}
},
"pressure": {
"Pa": {"kg": 1, "m": -1, "s": -2},
"KPa": {"kg": 1, "m": -1, "s": -2, 1000: 1},
"MPa": {"kg": 1, "m": -1, "s": -2, 1e6: 1},
"GPa": {"kg": 1, "m": -1, "s": -2, 1e9: 1}
},
"power": {
"W": {"m": 2, "kg": 1, "s": -3}
},
"emf": {
"V": {"m": 2, "kg": 1, "s": -3, "A": -1}
},
"capacitance": {
"F": {"m": -2, "kg": -1, "s": 4, "A": 2}
},
"resistance": {
"ohm": {"m": 2, "kg": 1, "s": -3, "A": -2}
},
"conductance": {
"S": {"m": -2, "kg": -1, "s": 3, "A": 2}
},
"magnetic_flux": {
"Wb": {"m": 2, "kg": 1, "s": -2, "A": -1}
}
}
ALL_UNITS = dict(list(BASE_UNITS.items()) + list(DERIVED_UNITS.items()))
# Mapping unit name --> unit type (unit names must be unique).
_UNAME2UTYPE = {}
for utype, d in ALL_UNITS.items():
assert not set(d.keys()).intersection(_UNAME2UTYPE.keys())
_UNAME2UTYPE.update({uname: utype for uname in d})
del utype, d
def _get_si_unit(unit):
unit_type = _UNAME2UTYPE[unit]
si_unit = filter(lambda k: BASE_UNITS[unit_type][k] == 1,
BASE_UNITS[unit_type].keys())
return list(si_unit)[0], BASE_UNITS[unit_type][unit]
class UnitError(BaseException):
"""
Exception class for unit errors.
"""
def check_mappings(u):
for v in DERIVED_UNITS.values():
for k2, v2 in v.items():
if all([v2.get(ku, 0) == vu for ku, vu in u.items()]) and \
all([u.get(kv2, 0) == vv2 for kv2, vv2 in v2.items()]):
return {k2: 1}
return u
class Unit(collections.Mapping):
"""
Represents a unit, e.g., "m" for meters, etc. Supports compound units.
Only integer powers are supported for units.
"""
Error = UnitError
def __init__(self, unit_def):
"""
Constructs a unit.
Args:
unit_def: A definition for the unit. Either a mapping of unit to
powers, e.g., {"m": 2, "s": -1} represents "m^2 s^-1",
or simply as a string "kg m^2 s^-1". Note that the supported
format uses "^" as the power operator and all units must be
space-separated.
"""
if isinstance(unit_def, six.string_types):
unit = collections.defaultdict(int)
for m in re.finditer("([A-Za-z]+)\s*\^*\s*([\-0-9]*)", unit_def):
p = m.group(2)
p = 1 if not p else int(p)
k = m.group(1)
unit[k] += p
else:
unit = {k: v for k, v in dict(unit_def).items() if v != 0}
self._unit = check_mappings(unit)
def __mul__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] += v
return Unit(new_units)
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] -= v
return Unit(new_units)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, i):
return Unit({k: v * i for k, v in self.items()})
def __iter__(self):
return self._unit.__iter__()
def __getitem__(self, i):
return self._unit[i]
def __len__(self):
return len(self._unit)
def __repr__(self):
sorted_keys = sorted(self._unit.keys(),
key=lambda k: (-self._unit[k], k))
return " ".join(["{}^{}".format(k, self._unit[k])
if self._unit[k] != 1 else k
for k in sorted_keys if self._unit[k] != 0])
def __str__(self):
return self.__repr__()
@property
def as_base_units(self):
"""
Converts all units to base SI units, including derived units.
Returns:
(base_units_dict, scaling factor). base_units_dict will not
contain any constants, which are gathered in the scaling factor.
"""
b = collections.defaultdict(int)
factor = 1
for k, v in self.items():
derived = False
for d in DERIVED_UNITS.values():
if k in d:
for k2, v2 in d[k].items():
if isinstance(k2, Number):
factor *= k2 ** (v2 * v)
else:
b[k2] += v2 * v
derived = True
break
if not derived:
si, f = _get_si_unit(k)
b[si] += v
factor *= f ** v
return {k: v for k, v in b.items() if v != 0}, factor
def get_conversion_factor(self, new_unit):
"""
Returns a conversion factor between this unit and a new unit.
Compound units are supported, but must have the same powers in each
unit type.
Args:
new_unit: The new unit.
"""
uo_base, ofactor = self.as_base_units
un_base, nfactor = Unit(new_unit).as_base_units
units_new = sorted(un_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
units_old = sorted(uo_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
factor = ofactor / nfactor
for uo, un in zip(units_old, units_new):
if uo[1] != un[1]:
raise UnitError("Units %s and %s are not compatible!" % (uo, un))
c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]
factor *= (c[uo[0]] / c[un[0]]) ** uo[1]
return factor
class FloatWithUnit(float):
"""
Subclasses float to attach a unit type. Typically, you should use the
pre-defined unit type subclasses such as Energy, Length, etc. instead of
using FloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity). Note that FloatWithUnit does not override the eq
method for float, i.e., units are not checked when testing for equality.
The reason is to allow this class to be used transparently wherever floats
are expected.
>>> e = Energy(1.1, "Ha")
>>> a = Energy(1.1, "Ha")
>>> b = Energy(3, "eV")
>>> c = a + b
>>> print(c)
1.2102479761938871 Ha
>>> c.to("eV")
32.932522246000005 eV
"""
Error = UnitError
@classmethod
def from_string(cls, s):
"""
Initialize a FloatWithUnit from a string. Example Memory.from_string("1. Mb")
"""
# Extract num and unit string.
s = s.strip()
for i, char in enumerate(s):
if char.isalpha() or char.isspace():
break
else:
raise Exception("Unit is missing in string %s" % s)
num, unit = float(s[:i]), s[i:]
# Find unit type (set it to None if it cannot be detected)
for unit_type, d in BASE_UNITS.items():
if unit in d:
break
else:
unit_type = None
return cls(num, unit, unit_type=unit_type)
def __new__(cls, val, unit, unit_type=None):
new = float.__new__(cls, val)
new._unit = Unit(unit)
new._unit_type = unit_type
return new
def __init__(self, val, unit, unit_type=None):
"""
Initializes a float with unit.
Args:
val (float): Value
unit (Unit): A unit. E.g., "C".
unit_type (str): A type of unit. E.g., "charge"
"""
if unit_type is not None and str(unit) not in ALL_UNITS[unit_type]:
raise UnitError(
"{} is not a supported unit for {}".format(unit, unit_type))
self._unit = Unit(unit)
self._unit_type = unit_type
def __repr__(self):
return super(FloatWithUnit, self).__repr__()
def __str__(self):
s = super(FloatWithUnit, self).__str__()
return "{} {}".format(s, self._unit)
def __add__(self, other):
if not hasattr(other, "unit_type"):
return super(FloatWithUnit, self).__add__(other)
if other.unit_type != self._unit_type:
raise UnitError("Adding different types of units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) + val, unit_type=self._unit_type,
unit=self._unit)
def __sub__(self, other):
if not hasattr(other, "unit_type"):
return super(FloatWithUnit, self).__sub__(other)
if other.unit_type != self._unit_type:
raise UnitError("Subtracting different units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) - val, unit_type=self._unit_type,
unit=self._unit)
def __mul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other,
unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None,
unit=self._unit * other._unit)
def __rmul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other,
unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None,
unit=self._unit * other._unit)
def __pow__(self, i):
return FloatWithUnit(float(self) ** i, unit_type=None,
unit=self._unit ** i)
def __div__(self, other):
val = super(FloatWithUnit, self).__div__(other)
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(val, unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(val, unit_type=None,
unit=self._unit / other._unit)
def __truediv__(self, other):
val = super(FloatWithUnit, self).__truediv__(other)
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(val, unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(val, unit_type=None,
unit=self._unit / other._unit)
def __neg__(self):
return FloatWithUnit(super(FloatWithUnit, self).__neg__(),
unit_type=self._unit_type,
unit=self._unit)
def __getnewargs__(self):
"""Function used by pickle to recreate object."""
#print(self.__dict__)
# FIXME
# There's a problem with _unit_type if we try to unpickle objects from file.
# since self._unit_type might not be defined. I think this is due to
# the use of decorators (property and unitized). In particular I have problems with "amu"
# likely due to weight in core.composition
if hasattr(self, "_unit_type"):
args = float(self), self._unit, self._unit_type
else:
args = float(self), self._unit, None
return args
def __getstate__(self):
state = self.__dict__.copy()
state["val"] = float(self)
#print("in getstate %s" % state)
return state
def __setstate__(self, state):
#print("in setstate %s" % state)
self._unit = state["_unit"]
@property
def unit_type(self):
return self._unit_type
@property
def unit(self):
return self._unit
def to(self, new_unit):
"""
Conversion to a new_unit. Right now, only supports 1 to 1 mapping of
units of each type.
Args:
new_unit: New unit type.
Returns:
A FloatWithUnit object in the new units.
Example usage:
>>> e = Energy(1.1, "eV")
>>> e = Energy(1.1, "Ha")
>>> e.to("eV")
29.932522246 eV
"""
return FloatWithUnit(
self * self.unit.get_conversion_factor(new_unit),
unit_type=self._unit_type,
unit=new_unit)
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return tuple(ALL_UNITS[self._unit_type].keys())
class ArrayWithUnit(np.ndarray):
"""
Subclasses `numpy.ndarray` to attach a unit type. Typically, you should
use the pre-defined unit type subclasses such as EnergyArray,
LengthArray, etc. instead of using ArrayWithFloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity).
>>> a = EnergyArray([1, 2], "Ha")
>>> b = EnergyArray([1, 2], "eV")
>>> c = a + b
>>> print(c)
[ 1.03674933 2.07349865] Ha
>>> c.to("eV")
array([ 28.21138386, 56.42276772]) eV
"""
Error = UnitError
def __new__(cls, input_array, unit, unit_type=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attributes to the created instance
obj._unit = Unit(unit)
obj._unit_type = unit_type
return obj
def __array_finalize__(self, obj):
"""
See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html for
comments.
"""
if obj is None:
return
self._unit = getattr(obj, "_unit", None)
self._unit_type = getattr(obj, "_unit_type", None)
#TODO abstract base class property?
@property
def unit_type(self):
return self._unit_type
#TODO abstract base class property?
@property
def unit(self):
return self._unit
def __reduce__(self):
#print("in reduce")
reduce = list(super(ArrayWithUnit, self).__reduce__())
#print("unit",self._unit)
#print(reduce[2])
reduce[2] = {"np_state": reduce[2], "_unit": self._unit}
return tuple(reduce)
def __setstate__(self, state):
#print("in setstate %s" % str(state))
super(ArrayWithUnit, self).__setstate__(state["np_state"])
self._unit = state["_unit"]
def __repr__(self):
return "{} {}".format(np.array(self).__repr__(), self.unit)
def __str__(self):
return "{} {}".format(np.array(self).__str__(), self.unit)
def __add__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Adding different types of units is"
" not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) + np.array(other),
unit_type=self.unit_type, unit=self.unit)
def __sub__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Subtracting different units is not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) - np.array(other),
unit_type=self.unit_type, unit=self.unit)
def __mul__(self, other):
# FIXME
# Here we have the most important difference between FloatWithUnit and
# ArrayWithFloatWithUnit:
# If other does not have units, I return an object with the same units
# as self.
# if other *has* units, I return an object *without* units since
# taking into account all the possible derived quantities would be
# too difficult.
# Moreover Energy(1.0) * Time(1.0, "s") returns 1.0 Ha that is a
# bit misleading.
# Same protocol for __div__
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__mul__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
# Cannot use super since it returns an instance of self.__class__
# while here we want a bare numpy array.
return self.__class__(
np.array(self).__mul__(np.array(other)),
unit=self.unit * other.unit)
def __rmul__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__rmul__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
return self.__class__(
np.array(self).__rmul__(np.array(other)),
unit=self.unit * other.unit)
def __div__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__div__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
return self.__class__(
np.array(self).__div__(np.array(other)),
unit=self.unit/other.unit)
def __truediv__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__truediv__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
return self.__class__(
np.array(self).__truediv__(np.array(other)),
unit=self.unit / other.unit)
def __neg__(self):
return self.__class__(np.array(self).__neg__(),
unit_type=self.unit_type, unit=self.unit)
def to(self, new_unit):
"""
Conversion to a new_unit.
Args:
new_unit:
New unit type.
Returns:
A ArrayWithFloatWithUnit object in the new units.
Example usage:
>>> e = EnergyArray([1, 1.1], "Ha")
>>> e.to("eV")
array([ 27.21138386, 29.93252225]) eV
"""
return self.__class__(
np.array(self) * self.unit.get_conversion_factor(new_unit),
unit_type=self.unit_type, unit=new_unit)
#TODO abstract base class property?
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return ALL_UNITS[self.unit_type]
#TODO abstract base class method?
def conversions(self):
"""
Returns a string showing the available conversions.
Useful tool in interactive mode.
"""
return "\n".join(str(self.to(unit)) for unit in self.supported_units)
def _my_partial(func, *args, **kwargs):
"""
Partial returns a partial object and therefore we cannot inherit class methods defined in FloatWithUnit.
This function calls partial and patches the new class before returning.
"""
newobj = partial(func, *args, **kwargs)
# monkey patch
newobj.from_string = FloatWithUnit.from_string
return newobj
Energy = partial(FloatWithUnit, unit_type="energy")
"""
A float with an energy unit.
Args:
val (float): Value
unit (Unit): E.g., eV, kJ, etc. Must be valid unit or UnitError is raised.
"""
EnergyArray = partial(ArrayWithUnit, unit_type="energy")
Length = partial(FloatWithUnit, unit_type="length")
"""
A float with a length unit.
Args:
val (float): Value
unit (Unit): E.g., m, ang, bohr, etc. Must be valid unit or UnitError is
raised.
"""
LengthArray = partial(ArrayWithUnit, unit_type="length")
Mass = partial(FloatWithUnit, unit_type="mass")
"""
A float with a mass unit.
Args:
val (float): Value
unit (Unit): E.g., amu, kg, etc. Must be valid unit or UnitError is
raised.
"""
MassArray = partial(ArrayWithUnit, unit_type="mass")
Temp = partial(FloatWithUnit, unit_type="temperature")
"""
A float with a temperature unit.
Args:
val (float): Value
unit (Unit): E.g., K. Only K (kelvin) is supported.
"""
TempArray = partial(ArrayWithUnit, unit_type="temperature")
Time = partial(FloatWithUnit, unit_type="time")
"""
A float with a time unit.
Args:
val (float): Value
unit (Unit): E.g., s, min, h. Must be valid unit or UnitError is
raised.
"""
TimeArray = partial(ArrayWithUnit, unit_type="time")
Charge = partial(FloatWithUnit, unit_type="charge")
"""
A float with a charge unit.
Args:
val (float): Value
unit (Unit): E.g., C, e (electron charge). Must be valid unit or UnitError
is raised.
"""
ChargeArray = partial(ArrayWithUnit, unit_type="charge")
Memory = _my_partial(FloatWithUnit, unit_type="memory")
"""
A float with a memory unit.
Args:
val (float): Value
unit (Unit): E.g., Kb, Mb, Gb, Tb. Must be valid unit or UnitError
is raised.
"""
def obj_with_unit(obj, unit):
"""
Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of
objects with units if obj is a dict, else an instance of
`ArrayWithFloatWithUnit`.
Args:
unit: Specific units (eV, Ha, m, ang, etc.).
"""
unit_type = _UNAME2UTYPE[unit]
if isinstance(obj, numbers.Number):
return FloatWithUnit(obj, unit=unit, unit_type=unit_type)
elif isinstance(obj, collections.Mapping):
return {k: obj_with_unit(v, unit) for k,v in obj.items()}
else:
return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)
def unitized(unit):
"""
Useful decorator to assign units to the output of a function. For
sequences, all values in the sequences are assigned the same unit. It
works with Python sequences only. The creation of numpy arrays loses all
unit information. For mapping types, the values are assigned units.
Args:
unit: Specific unit (eV, Ha, m, ang, etc.).
Example usage::
@unitized(unit="kg")
def get_mass():
return 123.45
"""
def wrap(f):
def wrapped_f(*args, **kwargs):
val = f(*args, **kwargs)
#print(val)
unit_type = _UNAME2UTYPE[unit]
if isinstance(val, collections.Sequence):
# TODO: why don't we return a ArrayWithUnit?
# This complicated way is to ensure the sequence type is
# preserved (list or tuple).
return val.__class__([FloatWithUnit(i, unit_type=unit_type,
unit=unit) for i in val])
elif isinstance(val, collections.Mapping):
for k, v in val.items():
val[k] = FloatWithUnit(v, unit_type=unit_type, unit=unit)
elif isinstance(val, numbers.Number):
return FloatWithUnit(val, unit_type=unit_type, unit=unit)
elif val is None:
pass
else:
raise TypeError("Don't know how to assign units to %s" % str(val))
return val
return wrapped_f
return wrap
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"content_hash": "9c9d46cdaaad92aecae44de022d56adb",
"timestamp": "",
"source": "github",
"line_count": 827,
"max_line_length": 109,
"avg_line_length": 31.808948004836758,
"alnum_prop": 0.5395347069109708,
"repo_name": "yanikou19/pymatgen",
"id": "7a606adb62300c976b290bed1a8aa8473fd88ed2",
"size": "26323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/core/units.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7429"
},
{
"name": "JavaScript",
"bytes": "3638"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "3368797"
},
{
"name": "Shell",
"bytes": "5100"
}
],
"symlink_target": ""
} |
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import logging
import time
import numpy as np
import struct
import decorator_of_function
from class_painter_by_ndarray import *
################################### PART2 CLASS && FUNCTION ###########################
class CreateNetwork(object):
Decorator = decorator_of_function.CreateDecorator()
@Decorator.log_of_function
def __init__(self):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = './my-first-cnn.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = CreateNetwork.__name__))
@Decorator.log_of_function
def __del__(self):
logging.info("END CLASS {class_name}.".format(class_name = CreateNetwork.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = CreateNetwork.__name__, delta_time = self.end))
@Decorator.log_of_function
def load_image_data_set(self, img_data_dir):
logging.info("Load image data set from {0}.".format(img_data_dir))
with open(img_data_dir, "rb") as binary_file_handle:
image_data_buffer = binary_file_handle.read()
# '>IIII'是说使用大端法读取4个unsigned int32
# unpack_from(...)
# Unpack the buffer, containing packed C structure data, according to
# fmt, starting at offset. Requires len(buffer[offset:]) >= calcsize(fmt).
head = struct.unpack_from('>IIII' , image_data_buffer ,0)
logging.info("head:{0}".format(head))
magic_num = struct.calcsize('>IIII')
img_num = head[1]
img_width = head[2]
img_height = head[3]
logging.info("magic_num:{0}".format(magic_num))
logging.info("img_num:{0}".format(img_num))
logging.info("img_width:{0}".format(img_width))
logging.info("img_height:{0}".format(img_height))
#[60000]*28*28
all_img_bit = img_num * img_width * img_height
all_img_bit_string = '>' + str(all_img_bit) + 'B' #like '>47040000B'
logging.info("all_img_bit_string:{0}".format(all_img_bit_string))
all_image_2d_ndarray = struct.unpack_from(all_img_bit_string, image_data_buffer, magic_num)
all_image_2d_ndarray = np.reshape(all_image_2d_ndarray, [img_num, img_width, img_height])
return all_image_2d_ndarray
@Decorator.log_of_function
def sigmoid_activation_function(self, input_matrix):
return 1.0 / (1.0 + np.exp(-input_matrix))
@Decorator.log_of_function
def tanh_activation_function(self, input_matrix):
sub_operator1 = np.exp(input_matrix)
sub_operator2 = np.exp(-input_matrix)
return (sub_operator1 - sub_operator2)/(sub_operator1 + sub_operator2)
@Decorator.log_of_function
def relu_activation_function(self, input_matrix):
return self.leaky_relu_activation_function(input_matrix = input_matrix,\
leaky_coefficient = 1)
@Decorator.log_of_function
def leaky_relu_activation_function(self, input_matrix, leaky_coefficient):
################# start #################
def relu(x, leak_coefficient):
if leaky_coefficient == 1:
return 0 if x <= 0 else x
else:
return 0 if x <= 0 else float(x)/leaky_coefficient
################# end #################
vectorized_relu = np.vectorize(relu)
return vectorized_relu(input_matrix, leak_coefficient = leaky_coefficient)
@Decorator.log_of_function
def max_out_activation_function(self, input_matrix):
pass
@Decorator.log_of_function
def softplus_activation_function(self, input_matrix):
return np.log(1 + input_matrix.dot(np.e))
@Decorator.log_of_function
def convolution(self, input_matrix, conv_operator_array = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]])):
conv_operator_shape_tuple = conv_operator_array.shape
conv_operator_height = conv_operator_shape_tuple[0]
conv_operator_width = conv_operator_shape_tuple[1]
input_matrix_shape_tuple = input_matrix.shape
input_matrix_height = input_matrix_shape_tuple[0]
input_matrix_width = input_matrix_shape_tuple[1]
if input_matrix_height <= conv_operator_height or input_matrix_width <= conv_operator_width:
logging.error("The input matrix({0}) can't execute convolution({1}) progress.".format(input_matrix_shape_tuple, conv_operator_shape_tuple))
unconv_input_matrix = input_matrix
return unconv_input_matrix
# initialization
new_conv_matrix_height = input_matrix_height - conv_operator_height + 1
new_conv_matrix_width = input_matrix_width - conv_operator_width + 1
# sub-function
def calculate_start_conv_coordinate_in_origin_matrix(new_conv_matrix_height, new_conv_matrix_width):
start_conv_coordinate_tuple_list = []
start_height_xrange = xrange(new_conv_matrix_height)
start_width_xrange = xrange(new_conv_matrix_width)
for start_height in start_height_xrange:
cur_conv_coordinate_tuple_list = map(lambda start_width: (start_height, start_width), start_width_xrange)
start_conv_coordinate_tuple_list.extend(cur_conv_coordinate_tuple_list)
return start_conv_coordinate_tuple_list
start_conv_coordinate_tuple_list_in_origin_matrix = calculate_start_conv_coordinate_in_origin_matrix(new_conv_matrix_height = new_conv_matrix_height,\
new_conv_matrix_width = new_conv_matrix_width)
logging.info("start_conv_coordinate_tuple_list_in_origin_matrix:{0}".format(start_conv_coordinate_tuple_list_in_origin_matrix))
logging.info("len(start_conv_coordinate_tuple_list_in_origin_matrix):{0}".format(len(start_conv_coordinate_tuple_list_in_origin_matrix)))
def calculate_conv_matrix_according_to_start_conv_coordinate(start_conv_coordinate_tuple_list_in_origin_matrix, input_matrix, conv_operator_array):
input_matrix = np.mat(input_matrix)
conv_operator_matrix = np.mat(conv_operator_array)
"""
logging.info("conv_operator_matrix:{0}".format(conv_operator_matrix))
logging.info("input_matrix:{0}".format(input_matrix))
"""
conv_operator_height, conv_operator_width = conv_operator_matrix.shape
"""
logging.info("conv_operator_matrix.height:{0}".format(conv_operator_height))
logging.info("conv_operator_width:{0}".format(conv_operator_width))
"""
# Initialize a convolution matrix
#conv_input_matrix = np.ones((new_conv_matrix_height, new_conv_matrix_width))
conv_input_list = list()
for start_coord_tuple_idx in xrange(len(start_conv_coordinate_tuple_list_in_origin_matrix)):
start_coord_tuple = start_conv_coordinate_tuple_list_in_origin_matrix[start_coord_tuple_idx]
#logging.info("start_coord_tuple:{0}".format(start_coord_tuple))
start_coord_height = start_coord_tuple[0]
end_coord_height = start_coord_height+conv_operator_height #+1(dont need plus 1, because operator starts from 1)
start_coord_width = start_coord_tuple[1]
end_coord_width = start_coord_width+conv_operator_width #+1(dont need plus 1, because operator starts from 1)
"""
conv_value = input_matrix[start_coord_height:end_coord_height, start_coord_width:end_coord_width]
logging.info("conv_value:{0}".format(conv_value))
break
"""
conv_value = input_matrix[start_coord_height:end_coord_height, start_coord_width:end_coord_width]\
.dot(conv_operator_matrix)\
.sum()
conv_input_list.append(conv_value)
conv_input_matrix = np\
.mat(conv_input_list)\
.reshape(new_conv_matrix_height,\
new_conv_matrix_width)
return conv_input_matrix
conv_input_matrix = calculate_conv_matrix_according_to_start_conv_coordinate(\
start_conv_coordinate_tuple_list_in_origin_matrix = start_conv_coordinate_tuple_list_in_origin_matrix,\
input_matrix = input_matrix,\
conv_operator_array = conv_operator_array)
return conv_input_matrix
@Decorator.log_of_function
def general_pooling(self, input_matrix, pooling_mode = "max", pooling_operator_shape_tuple = (2, 2)):
# First, Compare the scale of variable "input_matrix" with pooling operator("pooling_operator_shape_tuple")
input_matrix = np.mat(input_matrix)
if input_matrix.shape[0] <= pooling_operator_shape_tuple[0] or input_matrix.shape[1] <= pooling_operator_shape_tuple[1]:
logging.error("The scale of pooling operator is bigger than input matrix.")
return input_matrix
#logging.info("input_matrix.shape:{0}".format(input_matrix.shape))
# Second, Find all start pooling coordinate in origin matrix("input_matrix")
new_pooled_matrix_height = int(input_matrix.shape[0]/pooling_operator_shape_tuple[0])
new_pooled_matrix_width = int(input_matrix.shape[1]/pooling_operator_shape_tuple[1])
#logging.info("new_pooled_matrix_height:{0}".format(new_pooled_matrix_height))
#logging.info("new_pooled_matrix_width:{0}".format(new_pooled_matrix_width))
################# start #################
def calculate_start_pooling_coordinate_in_origin_matrix(input_matrix_shape_tuple, pooling_operator_shape_tuple):
start_pooling_coordinate_tuple_list = []
start_height_xrange = xrange(0, input_matrix_shape_tuple[0], pooling_operator_shape_tuple[0])
start_width_xrange = xrange(0, input_matrix_shape_tuple[1], pooling_operator_shape_tuple[1])
for start_height in start_height_xrange:
cur_pooling_coordinate_tuple_list = map(lambda start_width: (start_height, start_width), start_width_xrange)
start_pooling_coordinate_tuple_list.extend(cur_pooling_coordinate_tuple_list)
return start_pooling_coordinate_tuple_list
################# end #################
start_pooling_coordinate_tuple_list = calculate_start_pooling_coordinate_in_origin_matrix(input_matrix_shape_tuple = input_matrix.shape,\
pooling_operator_shape_tuple = pooling_operator_shape_tuple)
#logging.info("len(start_pooling_coordinate_tuple_list):{0}".format(len(start_pooling_coordinate_tuple_list)))
#logging.info("start_pooling_coordinate_tuple_list:{0}".format(start_pooling_coordinate_tuple_list))
################# start #################
def calculate_pooled_matrix_according_to_start_pooling_coordinate(start_pooling_coordinate_tuple_list_in_origin_matrix, input_matrix, pooling_mode, pooling_operator_shape_tuple, new_pooled_matrix_shape_tuple):
input_matrix = np.mat(input_matrix)
#logging.info("input_matrix:{0}".format(input_matrix))
pooling_operator_height, pooling_operator_width = pooling_operator_shape_tuple[0], pooling_operator_shape_tuple[1]
#logging.info("pooling_operator_matrix.height:{0}".format(pooling_operator_height))
#logging.info("pooling_operator_width:{0}".format(pooling_operator_width))
# Initialize a convolution matrix
pooled_input_list = list()
for start_coord_tuple_idx in xrange(len(start_pooling_coordinate_tuple_list_in_origin_matrix)):
start_coord_tuple = start_pooling_coordinate_tuple_list_in_origin_matrix[start_coord_tuple_idx]
#logging.info("start_coord_tuple:{0}".format(start_coord_tuple))
start_coord_height = start_coord_tuple[0]
end_coord_height = start_coord_height+pooling_operator_height #+1(dont need plus 1, because operator starts from 1)
start_coord_width = start_coord_tuple[1]
end_coord_width = start_coord_width+pooling_operator_width #+1(dont need plus 1, because operator starts from 1)
if pooling_mode == "max":
pooled_value = input_matrix[start_coord_height:end_coord_height, start_coord_width:end_coord_width]\
.max()
elif pooling_mode == "mean":
pooled_value = input_matrix[start_coord_height:end_coord_height, start_coord_width:end_coord_width]\
.mean()
pooled_input_list.append(pooled_value)
new_pooled_matrix_height, new_pooled_matrix_width = new_pooled_matrix_shape_tuple[0], new_pooled_matrix_shape_tuple[1]
pooled_input_matrix = np\
.mat(pooled_input_list)\
.reshape(new_pooled_matrix_height,\
new_pooled_matrix_width)
return pooled_input_matrix
################# end #################
pooled_input_matrix = calculate_pooled_matrix_according_to_start_pooling_coordinate(start_pooling_coordinate_tuple_list_in_origin_matrix = start_pooling_coordinate_tuple_list,\
input_matrix = input_matrix,\
pooling_operator_shape_tuple = pooling_operator_shape_tuple,\
pooling_mode = pooling_mode,\
new_pooled_matrix_shape_tuple = (new_pooled_matrix_height, new_pooled_matrix_width))
return pooled_input_matrix
@Decorator.log_of_function
def overlapping_pooling(self, input_matrix):
pass
@Decorator.log_of_function
def spatial_pyramid_pooling(self, input_matrix):
pass
################################### PART3 CLASS TEST ##################################
#'''
# Initialization
train_sample_data_dir = "..//data//input//train-images-idx3-ubyte"
train_label_data_dir = "..//data//input//train-labels-idx1-ubyte"
img_save_dir = "../data/output"
img_filename = "raw-relu.jpg"
# Get data and one image matrix
Net = CreateNetwork()
all_image_2d_ndarray = Net.load_image_data_set(img_data_dir = train_sample_data_dir)
input_matrix = all_image_2d_ndarray[0]
#logging.info(input_matrix)
# convolution
#input_matrix = Net.tanh_function(input_matrix = input_matrix)
#input_matrix = Net.softplus_activation_function(input_matrix = input_matrix)
#input_matrix = Net.convolution(input_matrix = input_matrix)
input_matrix = Net.relu_activation_function(input_matrix = input_matrix)
'''
# pooling
input_matrix = Net.general_pooling(input_matrix = input_matrix,\
pooling_mode = "max",\
pooling_operator_shape_tuple = (2, 2))
'''
#"""
Painter = PaintNDarray()
Painter.paint_one_img(img_ndarray = input_matrix,\
dpi = 1)
Painter.save_one_img(img_ndarray = input_matrix,\
img_save_dir = img_save_dir,\
img_filename = img_filename,\
dpi = 100,\
img_shape_tuple = input_matrix.shape)
#"""
#''' | {
"content_hash": "a526c9a7d4cac1abf16bf34884835499",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 217,
"avg_line_length": 46.219718309859154,
"alnum_prop": 0.5997684056557777,
"repo_name": "ysh329/my-first-cnn",
"id": "53005a2388953d5d8adeba870bdac0fb68ca2760",
"size": "16699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mycnn/class_create_network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40075"
}
],
"symlink_target": ""
} |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016-2018 Dietrich Pescoller
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import scope
from .. import scpi
from .. import extra
AcquisitionTypeMapping = {
'normal': 'norm',
'peak_detect': 'peak',
'high_resolution': 'hres',
'average': 'aver'}
VerticalCoupling = set(['ac', 'dc'])
TriggerTypeMapping = {
'edge': 'edge',
'width': 'glit',
'glitch': 'glit',
'tv': 'tv',
#'immediate': '',
'ac_line': 'edge',
'pattern': 'patt',
'can': 'can',
'duration': 'dur',
'i2s': 'i2s',
'iic': 'iic',
'eburst': 'ebur',
'lin': 'lin',
'm1553': 'm1553',
'sequence': 'seq',
'spi': 'spi',
'uart': 'uart',
'usb': 'usb',
'flexray': 'flex'}
TriggerCouplingMapping = {
'ac': ('ac', 0, 0),
'dc': ('dc', 0, 0),
'hf_reject': ('dc', 0, 1),
'lf_reject': ('lfr', 0, 0),
'noise_reject': ('dc', 1, 0),
'hf_reject_ac': ('ac', 0, 1),
'noise_reject_ac': ('ac', 1, 0),
'hf_noise_reject': ('dc', 1, 1),
'hf_noise_reject_ac': ('ac', 1, 1),
'lf_noise_reject': ('lfr', 1, 0)}
TVTriggerEventMapping = {'field1': 'fie1',
'field2': 'fie2',
'any_field': 'afi',
'any_line': 'alin',
'line_number': 'lfi1',
'vertical': 'vert',
'line_field1': 'lfi1',
'line_field2': 'lfi2',
'line': 'line',
'line_alternate': 'lalt',
'lvertical': 'lver'}
TVTriggerFormatMapping = {'generic': 'gen',
'ntsc': 'ntsc',
'pal': 'pal',
'palm': 'palm',
'secam': 'sec',
'p480l60hz': 'p480',
'p480': 'p480',
'p720l60hz': 'p720',
'p720': 'p720',
'p1080l24hz': 'p1080',
'p1080': 'p1080',
'p1080l25hz': 'p1080l25hz',
'p1080l50hz': 'p1080l50hz',
'p1080l60hz': 'p1080l60hz',
'i1080l50hz': 'i1080l50hz',
'i1080': 'i1080l50hz',
'i1080l60hz': 'i1080l60hz'}
PolarityMapping = {'positive': 'pos',
'negative': 'neg'}
GlitchConditionMapping = {'less_than': 'less',
'greater_than': 'gre'}
WidthConditionMapping = {'within': 'rang'}
SampleModeMapping = {'real_time': 'rtim',
'equivalent_time': 'etim',
'segmented': 'segm'}
SlopeMapping = {
'positive': 'pos',
'negative': 'neg',
'either': 'eith',
'alternating': 'alt'}
MeasurementFunctionMapping = {
'rise_time': 'risetime',
'fall_time': 'falltime',
'frequency': 'frequency',
'period': 'period',
'voltage_rms': 'vrms display',
'voltage_peak_to_peak': 'vpp',
'voltage_max': 'vmax',
'voltage_min': 'vmin',
'voltage_high': 'vtop',
'voltage_low': 'vbase',
'voltage_average': 'vaverage display',
'width_negative': 'nwidth',
'width_positive': 'pwidth',
'duty_cycle_positive': 'dutycycle',
'amplitude': 'vamplitude',
'voltage_cycle_rms': 'vrms cycle',
'voltage_cycle_average': 'vaverage cycle',
'overshoot': 'overshoot',
'preshoot': 'preshoot',
'ratio': 'vratio',
'phase': 'phase',
'delay': 'delay'}
MeasurementFunctionMappingDigital = {
'rise_time': 'risetime',
'fall_time': 'falltime',
'frequency': 'frequency',
'period': 'period',
'width_negative': 'nwidth',
'width_positive': 'pwidth',
'duty_cycle_positive': 'dutycycle'}
ScreenshotImageFormatMapping = {
'bmp': 'bmp',
'bmp24': 'bmp',
'bmp8': 'bmp8bit',
'png': 'png',
'png24': 'png'}
TimebaseModeMapping = {
'main': 'main',
'window': 'wind',
'xy': 'xy',
'roll': 'roll'}
TimebaseReferenceMapping = {
'left': 'left',
'center': 'cent',
'right': 'righ'}
TriggerModifierMapping = {'none': 'normal', 'auto': 'auto'}
class hmo1002(scpi.common.IdnCommand, scpi.common.ErrorQuery, scpi.common.Reset,
scpi.common.SelfTest, scpi.common.Memory,
scope.Base, scope.TVTrigger,
scope.GlitchTrigger, scope.WidthTrigger, scope.AcLineTrigger,
scope.WaveformMeasurement, scope.MinMaxWaveform,
scope.ContinuousAcquisition, scope.AverageAcquisition,
scope.SampleMode, scope.TriggerModifier, scope.AutoSetup,
extra.common.SystemSetup, extra.common.Screenshot,
ivi.Driver):
"R&S HMO 1002 Series generic IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
self._analog_channel_name = list()
self._analog_channel_count = 2
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._channel_label = list()
self._channel_probe_skew = list()
self._channel_scale = list()
self._channel_trigger_level = list()
self._channel_invert = list()
self._channel_probe_id = list()
self._channel_bw_limit = list()
super(hmo1002, self).__init__(*args, **kwargs)
self._self_test_delay = 40
self._memory_size = 10
self._analog_channel_name = list()
self._analog_channel_count = 2
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 1e9
self._horizontal_divisions = 10
self._vertical_divisions = 8
self._acquisition_segmented_count = 2
self._acquisition_segmented_index = 1
self._timebase_mode = 'main'
self._timebase_reference = 'center'
self._timebase_position = 0.0
self._timebase_range = 1e-3
self._timebase_scale = 100e-6
self._timebase_window_position = 0.0
self._timebase_window_range = 5e-6
self._timebase_window_scale = 500e-9
self._display_screenshot_image_format_mapping = ScreenshotImageFormatMapping
self._display_vectors = True
self._display_labels = True
self._identity_description = "HMO 1002 generic IVI oscilloscope driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Rohde Schwarz Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 4
self._identity_specification_minor_version = 1
self._identity_supported_instrument_models = ['HMO1002']
self._add_property('channels[].scale',
self._get_channel_scale,
self._set_channel_scale,
None,
ivi.Doc("""
Specifies the vertical scale, or units per division, of the channel. Units
are volts.
"""))
self._add_property('timebase.position',
self._get_timebase_position,
self._set_timebase_position,
None,
ivi.Doc("""
Sets the time interval between the trigger event and the display reference
point on the screen. The display reference point is either left, right, or
center and is set with the timebase.reference property. The maximum
position value depends on the time/division settings.
"""))
self._add_property('timebase.range',
self._get_timebase_range,
self._set_timebase_range,
None,
ivi.Doc("""
Sets the full-scale horizontal time in seconds for the main window. The
range is 10 times the current time-per-division setting.
"""))
self._add_property('timebase.scale',
self._get_timebase_scale,
self._set_timebase_scale,
None,
ivi.Doc("""
Sets the horizontal scale or units per division for the main window.
"""))
self._add_method('passfail.clear',
self._passfail_clear,
ivi.Doc("""
Clears the pass fail counter
"""))
self._add_method('passfail.start',
self._passfail_start,
ivi.Doc("""
start the pass fail test
"""))
self._add_method('passfail.stop',
self._passfail_stop,
ivi.Doc("""
stops the pass fail test
"""))
self._add_property('passfail.total',
self._get_passfail_total,
None,
None,
ivi.Doc("""
Returns number of perfomed test
"""))
self._add_property('passfail.failed',
self._get_passfail_failed,
None,
None,
ivi.Doc("""
Returns number of failed test
"""))
self._add_property('passfail.passed',
self._get_passfail_passed,
None,
None,
ivi.Doc("""
Returns number of passed test
"""))
self._init_channels()
def _initialize(self, resource=None, id_query=False, reset=False, **keywargs):
"Opens an I/O session to the instrument."
self._channel_count = self._analog_channel_count + self._digital_channel_count
super(hmo1002, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility.reset()
def _utility_disable(self):
pass
def _utility_lock_object(self):
pass
def _utility_unlock_object(self):
pass
def _init_channels(self):
try:
super(hmo1002, self)._init_channels()
except AttributeError:
pass
self._channel_name = list()
self._channel_label = list()
self._channel_probe_skew = list()
self._channel_invert = list()
self._channel_probe_id = list()
self._channel_scale = list()
self._channel_trigger_level = list()
self._channel_bw_limit = list()
self._analog_channel_name = list()
for i in range(self._analog_channel_count):
self._channel_name.append("channel%d" % (i + 1))
self._channel_label.append("%d" % (i + 1))
self._analog_channel_name.append("channel%d" % (i + 1))
self._channel_probe_skew.append(0)
self._channel_scale.append(1.0)
self._channel_trigger_level.append(0.0)
self._channel_invert.append(False)
self._channel_probe_id.append("NONE")
self._channel_bw_limit.append(False)
# digital channels
self._digital_channel_name = list()
if (self._digital_channel_count > 0):
for i in range(self._digital_channel_count):
self._channel_name.append("digital%d" % i)
self._channel_label.append("D%d" % i)
self._digital_channel_name.append("digital%d" % i)
for i in range(self._analog_channel_count, self._channel_count):
self._channel_input_impedance[i] = 100000
self._channel_input_frequency_max[i] = 1e9
self._channel_probe_attenuation[i] = 1
self._channel_coupling[i] = 'dc'
self._channel_offset[i] = 0
self._channel_range[i] = 1
self._channel_count = self._analog_channel_count + self._digital_channel_count
self.channels._set_list(self._channel_name)
def _system_fetch_setup(self):
if self._driver_operation_simulate:
return b''
self._write_raw(b'SYST:SET?')
return self._read_raw()
def _system_load_setup(self, data):
if self._driver_operation_simulate:
return
self._write_raw(b'SYST:SET ' + data)
# temporary patch
xWidth = self._ask('MASK:XWIDth?')
self._write('MASK:XWIDth ' + str(float(xWidth)*1.1))
self._write('MASK:XWIDth ' + xWidth)
self._write('mask:load "/INT/REFERENCE/MSK%s.HMK"' % (chr(data[2563])+chr(data[2564])) )
self.driver_operation.invalidate_all_attributes()
return
def _get_timebase_position(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._timebase_position = float(self._ask("timebase:position?"))
self._set_cache_valid()
return self._timebase_position
def _set_timebase_position(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("timebase:position %e" % value)
self._timebase_position = value
self._set_cache_valid()
self._set_cache_valid(False, 'timebase_window_position')
def _get_timebase_range(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._timebase_range = float(self._ask("timebase:range?"))
self._timebase_scale = self._timebase_range / self._horizontal_divisions
self._set_cache_valid()
self._set_cache_valid(True, 'timebase_scale')
return self._timebase_range
def _set_timebase_range(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("timebase:range %e" % value)
self._timebase_range = value
self._timebase_scale = value / self._horizontal_divisions
self._set_cache_valid()
self._set_cache_valid(True, 'timebase_scale')
self._set_cache_valid(False, 'timebase_window_scale')
self._set_cache_valid(False, 'timebase_window_range')
def _get_timebase_scale(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._timebase_scale = float(self._ask("timebase:scale?"))
self._timebase_range = self._timebase_scale * self._horizontal_divisions
self._set_cache_valid()
self._set_cache_valid(True, 'timebase_range')
return self._timebase_scale
def _set_timebase_scale(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("timebase:scale %e" % value)
self._timebase_scale = value
self._timebase_range = value * self._horizontal_divisions
self._set_cache_valid()
self._set_cache_valid(True, 'timebase_range')
self._set_cache_valid(False, 'timebase_window_scale')
self._set_cache_valid(False, 'timebase_window_range')
def _get_channel_range(self, index):
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_range[index] = float(self._ask(":%s:range?" % self._channel_name[index]))
self._channel_scale[index] = self._channel_range[index] / self._vertical_divisions
self._set_cache_valid(index=index)
self._set_cache_valid(True, "channel_scale", index)
return self._channel_range[index]
def _set_channel_range(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:range %e" % (self._channel_name[index], value))
self._channel_range[index] = value
self._channel_scale[index] = value / self._vertical_divisions
self._set_cache_valid(index=index)
self._set_cache_valid(True, "channel_scale", index)
self._set_cache_valid(False, "channel_offset", index)
def _get_channel_scale(self, index):
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_scale[index] = float(self._ask(":%s:scale?" % self._channel_name[index]))
self._channel_range[index] = self._channel_scale[index] * self._vertical_divisions
self._set_cache_valid(index=index)
self._set_cache_valid(True, "channel_range", index)
return self._channel_scale[index]
def _set_channel_scale(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:scale %e" % (self._channel_name[index], value))
self._channel_scale[index] = value
self._channel_range[index] = value * self._vertical_divisions
self._set_cache_valid(index=index)
self._set_cache_valid(True, "channel_range", index)
self._set_cache_valid(False, "channel_offset", index)
def _get_measurement_status(self):
return self._measurement_status
def _measurement_auto_setup(self):
if not self._driver_operation_simulate:
self._write(":autoscale")
def _utility_reset(self):
if not self._driver_operation_simulate:
self._write('*rst')
def _passfail_clear(self):
if not self._driver_operation_simulate:
self._write('mask:res:coun')
def _passfail_start(self):
if not self._driver_operation_simulate:
self._write('mask:test run')
def _passfail_stop(self):
if not self._driver_operation_simulate:
self._write('mask:test stop')
def _get_passfail_total(self):
if not self._driver_operation_simulate:
value = int(self._ask(":mask:coun?"))
return value
else:
return 0
def _get_passfail_failed(self):
if not self._driver_operation_simulate:
value = int(self._ask(":mask:vco?"))
return value
else:
return 0
def _get_passfail_passed(self):
return self._get_passfail_total() - self._get_passfail_failed()
| {
"content_hash": "3747cca01f83670348aeaec9d0ceffec",
"timestamp": "",
"source": "github",
"line_count": 525,
"max_line_length": 99,
"avg_line_length": 38.94285714285714,
"alnum_prop": 0.5569087796527268,
"repo_name": "Diti24/python-ivi",
"id": "73fe65d7c157df1a3d7072dcb06888400ef750e3",
"size": "20445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ivi/rohdeschwarz/hmo1002.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1992462"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import sys
import utool as ut
def run_tests():
# Build module list and run tests
import sys
ut.change_term_title('RUN IBEIS_CNN TESTS')
exclude_doctests_fnames = set([
])
exclude_dirs = [
'_broken',
#'old',
#'tests',
#'timeits',
#'_scripts',
#'_timeits',
#'_doc',
#'notebook',
]
dpath_list = ['ibeis_cnn']
doctest_modname_list = ut.find_doctestable_modnames(
dpath_list, exclude_doctests_fnames, exclude_dirs)
for modname in doctest_modname_list:
exec('import ' + modname, globals(), locals())
module_list = [sys.modules[name] for name in doctest_modname_list]
nPass, nTotal, failed_cmd_list = ut.doctest_module_list(module_list)
if nPass != nTotal:
return 1
else:
return 0
if __name__ == '__main__':
import multiprocessing
multiprocessing.freeze_support()
retcode = run_tests()
sys.exit(retcode)
| {
"content_hash": "9e76d3de1c213405b21855b2295ec958",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 72,
"avg_line_length": 26.512820512820515,
"alnum_prop": 0.5986460348162476,
"repo_name": "bluemellophone/ibeis_cnn",
"id": "e5358f0922c010f0ebf923b61581aa0f5e269f76",
"size": "1083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "667619"
},
{
"name": "Shell",
"bytes": "333"
}
],
"symlink_target": ""
} |
""" Calculate and manipuate kmer
"""
import re
import itertools
import math
from collections import deque
from collections import Counter
import numpy as np
import Bio.SeqIO as SeqIO
# Global variable
KMER_ARR = ["A", "C", "G", "T"]
KSIZE_LIMIT = 31
# Regex to clean sequence data
NUCLEOTIDE_REGEX = re.compile("[^ATGC]")
# Function to collapse kmer
id_fn = lambda x: x
def kmer_location(kmer):
""" Calculate kmer location to store in array. Note that AAAA does not end up in 0 since with this encoding scheme
leave some space for lower kmer.
NOTE: Not compatible with decode.
"""
encMap = {"A": 1, "C": 2, "G": 3, "T": 4}
code = 0
for ch in kmer:
code *= 4
code += encMap[ch]
return code
def encode(k):
"""
"""
encoding_map = {"A": 0, "C": 1, "G": 2, "T": 3}
code = 0
for ch in k:
code *= 4
code += encoding_map[ch]
return code, len(k)
def decode(code, length):
""" Reverse
"""
decoding_lst = ["A", "C", "G", "T"]
ret = ''
for _ in range(length):
index = code & 3
code >>= 2
ret = decoding_lst[index] + ret
return ret
def guess_front(ksize):
"""
"""
# Guess a front of character, return a result that we could use np.digitize
# bins = guess_front(8)
# np.digitize([hash], bins) -> 0 = A, 1 = C, 2 = G, 3 = T
A_lim = encode("A" + ("T" * (ksize - 1)))[0]
C_lim = int((A_lim * 2) + 1)
G_lim = int((A_lim * 3) + 2)
return np.array([A_lim + 1, C_lim + 1, G_lim + 1])
def trimFront(khashs, ksize):
""" Trim k-mer from front
Args:
khashs (np.array)
"""
bins = guess_front(ksize)
if ksize < 27:
frontCharHash = np.digitize(khashs, bins)
else: # digitize have problem with a very large number.
frontCharHash = np.searchsorted(bins, khashs, side='right')
fHash = khashs - (frontCharHash * (4 ** (ksize - 1)))
return fHash
def trimBack(khashs):
""" Trim k-mer from back
Args:
khashs (np.array)
"""
return khashs // 4
def generateMers(size=4):
"""Return a list of mers
"""
kmerList = KMER_ARR
it = itertools.product(kmerList, repeat=size)
for mers in it:
yield "".join(mers)
def create_kmer_loc_fn(size):
""" Hash location of kmer for specific size.
NOTE: This is pretty much similar to encode. May refactor later.
"""
offset = kmer_location("A" * size)
def wrapped(seq):
return kmer_location(seq) - offset
return wrapped
def kmer_hash_emit(seq, ksize, keyfn=encode):
""" Calculate kmer and emit them.
Args:
seq (string): Cleaned, uppercase nucleotide sequence.
ksize (int): size of kmer to calculate
keyfn (func): hash function.
Returns: Yield hash kmer in each.
"""
for i in range(0, len(seq) - ksize + 1):
kmer = seq[i: i + ksize]
yield keyfn(kmer)[0]
def kmer_hash_emit_rolling(seq, ksize):
""" Calculate kmer's hash and emit it.
This one is specialize and optimize by using rolling hashing method.
Args:
seq (string): nucleotide sequence without N.
ksize (int): size of kmer to calculate
keyfn (func): hash function.
Returns: Yield hash kmer in each.
"""
encoding_map = {"A": 0, "C": 1, "G": 2, "T": 3}
# Initialize a queue
queue = deque(seq[0:ksize], ksize)
hash_val = encode(queue)[0]
yield hash_val
for char in seq[ksize:]:
# Delete front and add back
# update hash
hash_val -= encoding_map[queue[0]] * (4 ** (ksize - 1))
hash_val *= 4 # Shift
queue.append(char)
hash_val += encoding_map[queue[-1]]
yield hash_val | {
"content_hash": "e92bf38beeba618014abc97509e60c36",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 118,
"avg_line_length": 23.049079754601227,
"alnum_prop": 0.5765238221985627,
"repo_name": "yumyai/ksiga",
"id": "af5f92a72cdf99366205a94917dee5f264bea70a",
"size": "3804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ksiga/kmerutil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "146"
},
{
"name": "Python",
"bytes": "57019"
}
],
"symlink_target": ""
} |
"""
Created on Mar 09, 2016
@author: Tyranic-Moron
"""
import re
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
from CommandInterface import CommandInterface
class Var(CommandInterface):
triggers = ['var']
help = "var <varname> <value> - sets <varname> to <value>, which can be accessed later using $<varname>. " \
"the variables don't persist between messages, so it is only useful as a support function for aliases using sub and/or chain"
def execute(self, message):
"""
@type message: IRCMessage
"""
if len(message.ParameterList) < 1:
return IRCResponse(ResponseType.Say, "You didn't give a variable name!", message.ReplyTo)
varname = message.ParameterList[0]
value = u' '.join(message.Parameters.split(' ')[1:])
return IRCResponse(ResponseType.Say, "", message.ReplyTo, extraVars={varname: value})
| {
"content_hash": "9b765c4fe61fc955abaccdb9a079f28e",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 136,
"avg_line_length": 35.407407407407405,
"alnum_prop": 0.6631799163179917,
"repo_name": "Heufneutje/PyMoronBot",
"id": "f65d30044c165843808796d710c41b85a041f643",
"size": "980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Commands/Var.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "249471"
}
],
"symlink_target": ""
} |
"""Settings that need to be set in order to run the tests."""
import os
DEBUG = True
SITE_ID = 1
APP_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
LANGUAGE_CODE = 'en'
LANGUAGES = [
('en', 'English'),
('de', 'Deutsch'),
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
ROOT_URLCONF = 'multilingual_tags.tests.test_app.urls'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(APP_ROOT, '../app_static')
MEDIA_ROOT = os.path.join(APP_ROOT, '../app_media')
STATICFILES_DIRS = (
os.path.join(APP_ROOT, 'static'),
)
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(APP_ROOT, 'tests/test_app/templates')],
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'cms.context_processors.media',
'sekizai.context_processors.sekizai',
)
}
}]
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
'django_nose',
'hvad',
]
INTERNAL_APPS = [
'multilingual_tags',
'multilingual_tags.tests.test_app',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
SECRET_KEY = 'foobar'
| {
"content_hash": "c420d88c68109afa1105da3a5c815210",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 65,
"avg_line_length": 23.2027027027027,
"alnum_prop": 0.615608619685498,
"repo_name": "bitmazk/django-multilingual-tags",
"id": "13c8b04deaa518ad9357cbd66897d00f70b4d571",
"size": "1717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multilingual_tags/tests/test_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4225"
},
{
"name": "JavaScript",
"bytes": "15200"
},
{
"name": "Makefile",
"bytes": "327"
},
{
"name": "Python",
"bytes": "36436"
}
],
"symlink_target": ""
} |
import unittest
from os.path import join
from unittest import mock
from tests.recipes.recipe_lib_test import RecipeCtx
class TestPandasRecipe(RecipeCtx, unittest.TestCase):
"""
TestCase for recipe :mod:`~pythonforandroid.recipes.pandas`
"""
recipe_name = "pandas"
@mock.patch("pythonforandroid.recipe.Recipe.check_recipe_choices")
@mock.patch("pythonforandroid.build.ensure_dir")
@mock.patch("pythonforandroid.archs.glob")
@mock.patch("pythonforandroid.archs.find_executable")
def test_get_recipe_env(
self,
mock_find_executable,
mock_glob,
mock_ensure_dir,
mock_check_recipe_choices,
):
"""
Test that method
:meth:`~pythonforandroid.recipes.pandas.PandasRecipe.get_recipe_env`
returns the expected flags
"""
mock_find_executable.return_value = (
"/opt/android/android-ndk/toolchains/"
"llvm/prebuilt/linux-x86_64/bin/clang"
)
mock_glob.return_value = ["llvm"]
mock_check_recipe_choices.return_value = sorted(
self.ctx.recipe_build_order
)
numpy_includes = join(
self.ctx.get_python_install_dir(self.arch.arch), "numpy/core/include",
)
env = self.recipe.get_recipe_env(self.arch)
self.assertIn(numpy_includes, env["NUMPY_INCLUDES"])
self.assertIn(" -landroid", env["LDFLAGS"])
# make sure that the mocked methods are actually called
mock_glob.assert_called()
mock_ensure_dir.assert_called()
mock_find_executable.assert_called()
mock_check_recipe_choices.assert_called()
| {
"content_hash": "cf628adb883f09769a50e4476b125401",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 82,
"avg_line_length": 32.64705882352941,
"alnum_prop": 0.639039039039039,
"repo_name": "PKRoma/python-for-android",
"id": "3ac34d1d3b640456532027b361c88b567161b2b4",
"size": "1665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/recipes/test_pandas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "65170"
},
{
"name": "CMake",
"bytes": "250"
},
{
"name": "CSS",
"bytes": "3107"
},
{
"name": "Cython",
"bytes": "15033"
},
{
"name": "Dockerfile",
"bytes": "3040"
},
{
"name": "HTML",
"bytes": "4330"
},
{
"name": "Java",
"bytes": "134825"
},
{
"name": "Makefile",
"bytes": "10159"
},
{
"name": "Python",
"bytes": "784620"
},
{
"name": "Shell",
"bytes": "1499"
},
{
"name": "kvlang",
"bytes": "17453"
}
],
"symlink_target": ""
} |
"""
manage.py,
copyright (c) 2015 by Stefan Lehmann,
licensed under the MIT license
"""
import sys
import eventlet
import os
from app import create_app, db
from app.models import TempCtrlSettings, ProcessData, Step, Receipe
from app.control import tempcontroller
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
# only perform monkey patch in run mode otherwise
# shell won't be usable
if 'run' in sys.argv:
eventlet.monkey_patch()
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, TempCtrlSettings=TempCtrlSettings,
tempcontroller=tempcontroller, ProcessData=ProcessData,
Step=Step, Receipe=Receipe)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def run(host='0.0.0.0', port=5000, user_reloader=False):
"""
Run the Flask development server with websocket support.
"""
port = int(port)
from app import socketio
socketio.run(
app,
host=host,
port=port,
use_reloader=user_reloader
)
@manager.command
def test():
""" Run unit tests """
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| {
"content_hash": "51f0b21e117359ead583a14e2efa55e9",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 71,
"avg_line_length": 23.11111111111111,
"alnum_prop": 0.6936813186813187,
"repo_name": "MrLeeh/brewctrl",
"id": "f88b34b5892db0e2c0d31d9e522a052834eb32c2",
"size": "1480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "49386"
},
{
"name": "CSS",
"bytes": "651732"
},
{
"name": "HTML",
"bytes": "7878028"
},
{
"name": "Java",
"bytes": "13143"
},
{
"name": "JavaScript",
"bytes": "121781"
},
{
"name": "PHP",
"bytes": "149259"
},
{
"name": "Python",
"bytes": "33188"
},
{
"name": "Shell",
"bytes": "238"
}
],
"symlink_target": ""
} |
from integration_tests.framework import env
def setup_package():
env.create_env(env.ManagerTestEnvironment)
def teardown_package():
env.destroy_env()
| {
"content_hash": "911d47ecc41373b917f67084a7bfdbef",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 46,
"avg_line_length": 18,
"alnum_prop": 0.7469135802469136,
"repo_name": "isaac-s/cloudify-manager",
"id": "1ac3c6bf0ad097e65c1da63056f20e66af8c0061",
"size": "791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration_tests/tests/manager_tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "4067"
},
{
"name": "Mako",
"bytes": "541"
},
{
"name": "Python",
"bytes": "1793118"
},
{
"name": "Ruby",
"bytes": "40193"
},
{
"name": "Shell",
"bytes": "41526"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from os.path import join, dirname
from textx import metamodel_from_str
from textx.export import metamodel_export, model_export
grammar = '''
Calc: assignments*=Assignment expression=Expression;
Assignment: variable=ID '=' expression=Expression ';';
Expression: op=Term (op=PlusOrMinus op=Term)* ;
PlusOrMinus: '+' | '-';
Term: op=Factor (op=MulOrDiv op=Factor)*;
MulOrDiv: '*' | '/' ;
Factor: (sign=PlusOrMinus)? op=Operand;
Operand: op=NUMBER | op=ID | ('(' op=Expression ')');
'''
# Global variable namespace
namespace = {}
class Calc(object):
def __init__(self, **kwargs):
self.assignments = kwargs.pop('assignments')
self.expression = kwargs.pop('expression')
@property
def value(self):
# Evaluate variables in the order of definition
for a in self.assignments:
namespace[a.variable] = a.expression.value
return self.expression.value
class ExpressionElement(object):
def __init__(self, **kwargs):
# textX will pass in parent attribute used for parent-child
# relationships. We can use it if we want to.
self.parent = kwargs.get('parent', None)
# We have 'op' attribute in all grammar rules
self.op = kwargs['op']
super(ExpressionElement, self).__init__()
class Factor(ExpressionElement):
def __init__(self, **kwargs):
self.sign = kwargs.pop('sign', '+')
super(Factor, self).__init__(**kwargs)
@property
def value(self):
value = self.op.value
return -value if self.sign == '-' else value
class Term(ExpressionElement):
@property
def value(self):
ret = self.op[0].value
for operation, operand in zip(self.op[1::2], self.op[2::2]):
if operation == '*':
ret *= operand.value
else:
ret /= operand.value
return ret
class Expression(ExpressionElement):
@property
def value(self):
ret = self.op[0].value
for operation, operand in zip(self.op[1::2], self.op[2::2]):
if operation == '+':
ret += operand.value
else:
ret -= operand.value
return ret
class Operand(ExpressionElement):
@property
def value(self):
op = self.op
if type(op) in {int, float}:
return op
elif isinstance(op, ExpressionElement):
return op.value
elif op in namespace:
return namespace[op]
else:
raise Exception('Unknown variable "{}" at position {}'
.format(op, self._tx_position))
def main(debug=False):
calc_mm = metamodel_from_str(grammar,
classes=[Calc, Expression, Term, Factor,
Operand],
debug=debug)
this_folder = dirname(__file__)
if debug:
metamodel_export(calc_mm, join(this_folder, 'calc_metamodel.dot'))
input_expr = '''
a = 10;
b = 2 * a + 17;
-(4-1)*a+(2+4.67)+b*5.89/(.2+7)
'''
model = calc_mm.model_from_str(input_expr)
if debug:
model_export(model, join(this_folder, 'calc_model.dot'))
# Getting value property from the Calc instance will start evaluation.
result = model.value
assert (model.value - 6.93805555) < 0.0001
print("Result is", result)
if __name__ == '__main__':
main()
| {
"content_hash": "ad49bf61c9ec0243311b52febb2383a4",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 74,
"avg_line_length": 27.480314960629922,
"alnum_prop": 0.5727793696275072,
"repo_name": "igordejanovic/textX",
"id": "71d13b884ded2c85762372ecf3706c4128439547",
"size": "3490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/expression/calc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "93"
},
{
"name": "Brainfuck",
"bytes": "87"
},
{
"name": "C",
"bytes": "96"
},
{
"name": "Limbo",
"bytes": "634"
},
{
"name": "Python",
"bytes": "391212"
},
{
"name": "Shell",
"bytes": "1094"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from server.models import *
from django.db import models, migrations
import plistlib
def enable_plugins(apps, schema_editor):
Machine = apps.get_model("server", "Machine")
InstalledUpdate = apps.get_model("server", "InstalledUpdate")
Report = apps.get_model("server", "Report")
report_count = Report.objects.all().count()
if report_count == 0:
# shard_report = Report(name='ShardReport')
# shard_report.save()
install_report = Report(name='InstallReport')
install_report.save()
class Migration(migrations.Migration):
dependencies = [
('server', '0029_auto_20160210_1316'),
]
operations = [
migrations.RunPython(enable_plugins),
]
| {
"content_hash": "89e3dcc0d14c3c9d466288a015a570eb",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 65,
"avg_line_length": 24.64516129032258,
"alnum_prop": 0.6570680628272252,
"repo_name": "erikng/sal",
"id": "3a9bea2d8ef684e88b8ea3d59f34f17f21077e0b",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/migrations/0030_auto_20160212_1810.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "254975"
},
{
"name": "HTML",
"bytes": "248381"
},
{
"name": "JavaScript",
"bytes": "1148377"
},
{
"name": "Makefile",
"bytes": "2208"
},
{
"name": "Nginx",
"bytes": "1946"
},
{
"name": "Python",
"bytes": "757954"
},
{
"name": "Shell",
"bytes": "5922"
}
],
"symlink_target": ""
} |
import logging
import os
import mysql.connector
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',datefmt='%a, %d %b %Y %H:%M:%S')
class DB(object):
"""
:Some operations about databases
"""
def __init__(self):
logging.debug("DB init....")
self.db_user = os.environ.get('DB_USER', 'root')
self.db_pass = os.environ.get('DB_PASS', 'root')
self.db_host = os.environ.get('DB_HOST', '192.168.1.11')
self.db_port = os.environ.get('DB_PORT', 3366)
self.db_name = os.environ.get('DB_NAME', 'scale')
self.config = {
'user': self.db_user,
'password': self.db_pass,
'host': self.db_host,
'port': self.db_port,
'database': self.db_name,
'charset': 'utf8'}
logging.debug("The config:{}".format(self.config))
def connect_mysql(self):
try:
logging.debug("config in connect_mysql():{}".format(self.config))
conn=mysql.connector.connect(**self.config)
return conn
except Exception as e:
logging.debug(e)
def select_mysql(self,conn,sql):
logging.debug("sql={}".format(sql))
try:
cur=conn.cursor()
cur.execute(sql)
return cur.fetchall()
except Exception as e:
cur.close()
conn.close()
logging.debug(e)
else:
cur.close()
def insert_mysql(self,conn,sql):
logging.debug("sql={}".format(sql))
try:
cur=conn.cursor()
cur.execute(sql)
conn.commit()
except Exception as e:
cur.close()
conn.close()
logging.debug(e)
else:
cur.close()
def update_mysql(self,conn,sql):
logging.debug("sql={}".format(sql))
try:
cur=conn.cursor()
cur.execute(sql)
conn.commit()
except Exception as e:
cur.close()
conn.close()
logging.debug(e)
else:
cur.close()
def delete_mysql(self,conn,sql):
logging.debug("sql={}".format(sql))
try:
cur=conn.cursor()
cur.execute(sql)
except Exception as e:
cur.close()
conn.close()
logging.debug(e)
else:
cur.close()
def close_mysql(self,conn):
try:
conn.close()
except Exception as e:
logging.debug(e)
if __name__=="__main__":
db=DB()
conn=db.connect_mysql()
result=db.select_mysql(conn,"select * from quota_info")
print(result)
| {
"content_hash": "2105d09dc7c78a3ef2f54ab4d0734f81",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 149,
"avg_line_length": 28.081632653061224,
"alnum_prop": 0.5079941860465116,
"repo_name": "bc-dcos/autoscaling",
"id": "a21e76ccffd39514279194be0c4a60163bfbf90c",
"size": "2788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db_operation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "19451"
},
{
"name": "Python",
"bytes": "81152"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="funnelarea", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "84b43bfd9b99ca67205df3619eff0088",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 37.15384615384615,
"alnum_prop": 0.5921325051759835,
"repo_name": "plotly/python-api",
"id": "3fa079924fa3f29a5ef69895fd0477c2917181f7",
"size": "483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnelarea/_meta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from applications.assets import views
urlpatterns = patterns(
'',
url(r'^equipment/$', views.EquipmentViewList.as_view(), name='equipment_list'),
url(r'^equipment/create/$', views.EquipmentViewCreate.as_view(), name='equipment_create'),
url(r'^equipment/(?P<pk>\d)/$', views.EquipmentViewDetail.as_view(), name='equipment_detail'),
url(r'^equipment/(?P<pk>\d)/update/$', views.EquipmentViewUpdate.as_view(), name='equipment_update'),
url(r'^equipment/(?P<pk>\d)/delete/$', views.EquipmentViewDelete.as_view(), name='equipment_delete'),
url(r'^service/$', views.ServiceViewList.as_view(), name='service_list'),
url(r'^service/create/$', views.ServiceViewCreate.as_view(), name='service_create'),
url(r'^service/(?P<pk>\d)/$', views.ServiceViewDetail.as_view(), name='service_detail'),
url(r'^service/(?P<pk>\d)/update/$', views.ServiceViewUpdate.as_view(), name='service_update'),
url(r'^service/(?P<pk>\d)/delete/$', views.ServiceViewDelete.as_view(), name='service_delete'),
) | {
"content_hash": "317bfecb53a13bb03190e4e230ec199c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 105,
"avg_line_length": 62.705882352941174,
"alnum_prop": 0.6838649155722326,
"repo_name": "awwong1/apollo",
"id": "2041493dbf879fdca54dd1edac169c6a0380f5e5",
"size": "1066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applications/assets/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2892"
},
{
"name": "HTML",
"bytes": "79517"
},
{
"name": "JavaScript",
"bytes": "2154"
},
{
"name": "Python",
"bytes": "197209"
},
{
"name": "Shell",
"bytes": "1560"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.