text stringlengths 4 1.02M | meta dict |
|---|---|
from django.db import models
import hashlib, datetime
class Zip(models.Model):
filename = models.CharField(max_length=20, primary_key=True)
password = models.CharField(max_length=40)
date_created = models.DateTimeField(auto_now_add=True)
def encrypt(self, text):
h = hashlib.sha1()
h.update(self.filename)
h.update(text)
return h.hexdigest()
def save(self, *args, **kwargs):
self.password = self.encrypt(self.password)
super(Zip, self).save(*args, **kwargs)
def is_correct(self, passwordAttempt):
return self.encrypt(passwordAttempt) == self.password
| {
"content_hash": "9062a45a481b42ed94a8a480f9328e25",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 64,
"avg_line_length": 30.285714285714285,
"alnum_prop": 0.6650943396226415,
"repo_name": "ojarva/file-sharing",
"id": "99927bf53e54a51f51e6681da1e738e142d6b7ae",
"size": "636",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "filesharing/upload/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3497"
},
{
"name": "JavaScript",
"bytes": "77682"
},
{
"name": "Python",
"bytes": "15051"
},
{
"name": "Shell",
"bytes": "528"
}
],
"symlink_target": ""
} |
"""Python net specification.
This module provides a way to write nets directly in Python, using a natural,
functional style. See examples/pycaffe/caffenet.py for an example.
Currently this works as a thin wrapper around the Python protobuf interface,
with layers and parameters automatically generated for the "layers" and
"params" pseudo-modules, which are actually objects using __getattr__ magic
to generate protobuf messages.
Note that when using to_proto or Top.to_proto, names of intermediate blobs will
be automatically generated. To explicitly specify blob names, use the NetSpec
class -- assign to its attributes directly to name layers, and call
NetSpec.to_proto to serialize all assigned layers.
This interface is expected to continue to evolve as Caffe gains new capabilities
for specifying nets. In particular, the automatically generated layer names
are not guaranteed to be forward-compatible.
"""
from collections import OrderedDict, Counter
from .proto import caffe_pb2
from google import protobuf
import six
def param_name_dict():
"""Find out the correspondence between layer names and parameter names."""
layer = caffe_pb2.LayerParameter()
# get all parameter names (typically underscore case) and corresponding
# type names (typically camel case), which contain the layer names
# (note that not all parameters correspond to layers, but we'll ignore that)
param_names = [f.name for f in layer.DESCRIPTOR.fields if f.name.endswith('_param')]
param_type_names = [type(getattr(layer, s)).__name__ for s in param_names]
# strip the final '_param' or 'Parameter'
param_names = [s[:-len('_param')] for s in param_names]
param_type_names = [s[:-len('Parameter')] for s in param_type_names]
return dict(zip(param_type_names, param_names))
def to_proto(*tops):
"""Generate a NetParameter that contains all layers needed to compute
all arguments."""
layers = OrderedDict()
autonames = Counter()
for top in tops:
top.fn._to_proto(layers, {}, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
def assign_proto(proto, name, val):
"""Assign a Python object to a protobuf message, based on the Python
type (in recursive fashion). Lists become repeated fields/messages, dicts
become messages, and other types are assigned directly. For convenience,
repeated fields whose values are not lists are converted to single-element
lists; e.g., `my_repeated_int_field=3` is converted to
`my_repeated_int_field=[3]`."""
is_repeated_field = hasattr(getattr(proto, name), 'extend')
if is_repeated_field and not isinstance(val, list):
val = [val]
if isinstance(val, list):
if isinstance(val[0], dict):
for item in val:
proto_item = getattr(proto, name).add()
for k, v in six.iteritems(item):
assign_proto(proto_item, k, v)
else:
getattr(proto, name).extend(val)
elif isinstance(val, dict):
for k, v in six.iteritems(val):
assign_proto(getattr(proto, name), k, v)
else:
setattr(proto, name, val)
class Top(object):
"""A Top specifies a single output blob (which could be one of several
produced by a layer.)"""
def __init__(self, fn, n):
self.fn = fn
self.n = n
def to_proto(self):
"""Generate a NetParameter that contains all layers needed to compute
this top."""
return to_proto(self)
def _to_proto(self, layers, names, autonames):
return self.fn._to_proto(layers, names, autonames)
class Function(object):
"""A Function specifies a layer, its parameters, and its inputs (which
are Tops from other layers)."""
def __init__(self, type_name, inputs, params):
self.type_name = type_name
for index, input in enumerate(inputs):
if not isinstance(input, Top):
raise TypeError('%s input %d is not a Top (type is %s)' %
(type_name, index, type(input)))
self.inputs = inputs
self.params = params
self.ntop = self.params.get('ntop', 1)
# use del to make sure kwargs are not double-processed as layer params
if 'ntop' in self.params:
del self.params['ntop']
self.in_place = self.params.get('in_place', False)
if 'in_place' in self.params:
del self.params['in_place']
self.tops = tuple(Top(self, n) for n in range(self.ntop))
def _get_name(self, names, autonames):
if self not in names and self.ntop > 0:
names[self] = self._get_top_name(self.tops[0], names, autonames)
elif self not in names:
autonames[self.type_name] += 1
names[self] = self.type_name + str(autonames[self.type_name])
return names[self]
def _get_top_name(self, top, names, autonames):
if top not in names:
autonames[top.fn.type_name] += 1
names[top] = top.fn.type_name + str(autonames[top.fn.type_name])
return names[top]
def _to_proto(self, layers, names, autonames):
if self in layers:
return
bottom_names = []
for inp in self.inputs:
inp._to_proto(layers, names, autonames)
bottom_names.append(layers[inp.fn].top[inp.n])
layer = caffe_pb2.LayerParameter()
layer.type = self.type_name
layer.bottom.extend(bottom_names)
if self.in_place:
layer.top.extend(layer.bottom)
else:
for top in self.tops:
layer.top.append(self._get_top_name(top, names, autonames))
layer.name = self._get_name(names, autonames)
for k, v in six.iteritems(self.params):
# special case to handle generic *params
if k.endswith('param'):
assign_proto(layer, k, v)
else:
try:
assign_proto(getattr(layer,
_param_names[self.type_name] + '_param'), k, v)
except (AttributeError, KeyError):
assign_proto(layer, k, v)
layers[self] = layer
class NetSpec(object):
"""A NetSpec contains a set of Tops (assigned directly as attributes).
Calling NetSpec.to_proto generates a NetParameter containing all of the
layers needed to produce all of the assigned Tops, using the assigned
names."""
def __init__(self):
super(NetSpec, self).__setattr__('tops', OrderedDict())
def __setattr__(self, name, value):
self.tops[name] = value
def __getattr__(self, name):
return self.tops[name]
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __getitem__(self, item):
return self.__getattr__(item)
def to_proto(self):
names = {v: k for k, v in six.iteritems(self.tops)}
autonames = Counter()
layers = OrderedDict()
for name, top in six.iteritems(self.tops):
top._to_proto(layers, names, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
class Layers(object):
"""A Layers object is a pseudo-module which generates functions that specify
layers; e.g., Layers().Convolution(bottom, kernel_size=3) will produce a Top
specifying a 3x3 convolution applied to bottom."""
def __getattr__(self, name):
def layer_fn(*args, **kwargs):
fn = Function(name, args, kwargs)
if fn.ntop == 0:
return fn
elif fn.ntop == 1:
return fn.tops[0]
else:
return fn.tops
return layer_fn
class Parameters(object):
"""A Parameters object is a pseudo-module which generates constants used
in layer parameters; e.g., Parameters().Pooling.MAX is the value used
to specify max pooling."""
def __getattr__(self, name):
class Param:
def __getattr__(self, param_name):
return getattr(getattr(caffe_pb2, name + 'Parameter'), param_name)
return Param()
_param_names = param_name_dict()
layers = Layers()
params = Parameters()
| {
"content_hash": "5ed88980ecedb1b3ef22a880460c2adb",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 88,
"avg_line_length": 35.98695652173913,
"alnum_prop": 0.6254681647940075,
"repo_name": "jimmy-ren/RPN2T",
"id": "20918f9b6bcc556cdc88bc5c8974783b9332af99",
"size": "8277",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "external/_caffe/python/caffe/net_spec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "70925"
}
],
"symlink_target": ""
} |
"""
Reverse a singly linked list.
"""
__author__ = 'Daniel'
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return head
dummy = ListNode(0)
dummy.next = head
pre = dummy
cur = pre.next
while pre and cur:
pre, cur.next, cur = cur, pre, cur.next
dummy.next.next = None # original head
return pre # new head
| {
"content_hash": "fae20fc502bc760ec3323c88f8bb2592",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 51,
"avg_line_length": 18.806451612903224,
"alnum_prop": 0.5042881646655232,
"repo_name": "ee08b397/LeetCode-4",
"id": "6a7f77744260330dd2ea3c845d1d11c56ecb956d",
"size": "583",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "206 Reverse Linked List.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "555639"
}
],
"symlink_target": ""
} |
"""Techniques for determining the saliency of input features."""
import logging
from typing import List, Optional, Tuple, Union
from ai_weather_climate.interpretability import utils
import numpy as np
import tensorflow as tf
def get_gradients_against_prediction(
input_img: Union[tf.Tensor, np.ndarray],
model: tf.keras.Model,
mask: Optional[tf.Tensor] = None,
has_batch_dim: bool = True,
) -> Tuple[np.ndarray, np.ndarray]:
"""Computes the gradients of the given input against prediction.
Args:
input_img: The input tensor. The dimensions of the tensor should match what
the model expects except for the batch dimension. For eg., if model
expects a (b, h, w, c) tensor, the input should be (h, w, c).
model: The trained Keras model.
mask: The mask to apply to the prediction before obtaining gradients.
has_batch_dim: Whether the model input and output has a batch dimension.
Returns:
Returns gradients with respect to the input of same shape as input.
"""
input_img = tf.convert_to_tensor(input_img)
if has_batch_dim:
# Add the batch dimension.
input_img = tf.expand_dims(input_img, axis=0)
with tf.GradientTape() as gtape:
gtape.watch(input_img)
prediction = model(input_img)
if has_batch_dim:
# Remove the batch dimension.
prediction = prediction[0, ...]
if mask is not None:
assert mask.shape == prediction.shape, (f'{mask.shape} is not equal to '
f'{prediction.shape}')
prediction = tf.multiply(mask, prediction)
grads = gtape.gradient(prediction, input_img)
if has_batch_dim:
# Remove the batch dimension.
grads = np.squeeze(grads, axis=0)
return grads, prediction
def get_gradients_against_loss(
input_img: tf.Tensor,
model: tf.keras.Model,
label: tf.Tensor,
mask: Optional[tf.Tensor] = None,
has_batch_dim: bool = True,
is_classification: bool = True,
) -> Tuple[np.ndarray, np.ndarray]:
"""Computes the gradients of the given input against loss.
Args:
input_img: The input tensor. The dimensions of the tensor should match what
the model expects except for the batch dimension. For eg., if model
expects a (b, h, w, c) tensor, the input should be (h, w, c).
model: The trained Keras model.
label: The actual output.
mask: The mask to apply to the prediction before obtaining gradients.
has_batch_dim: Whether the model input and output has a batch dimension.
is_classification: Whether the model being interpreted is a classification
model.
Returns:
Returns gradients of loss with respect to the input of same shape as input.
"""
input_img = tf.convert_to_tensor(input_img)
if has_batch_dim:
# Add the batch dimension.
input_img = tf.expand_dims(input_img, axis=0)
with tf.GradientTape() as gtape:
gtape.watch(input_img)
prediction = model(input_img)
if has_batch_dim:
# Remove the batch dimension.
prediction = prediction[0, ...]
assert prediction.shape == label.shape
# TODO(shreyaa): Add support for loss functions other than cross entropy.
if mask is not None:
assert mask.shape == prediction.shape, (f'{mask.shape} is not equal to '
f'{prediction.shape}')
prediction = tf.multiply(mask, prediction)
label = tf.multiply(mask, label)
if is_classification:
loss = tf.keras.losses.binary_crossentropy(label, prediction)
else:
loss = tf.keras.losses.mean_squared_error(label, prediction)
grads = gtape.gradient(loss, input_img)
if has_batch_dim:
# Remove the batch dimension.
grads = np.squeeze(grads, axis=0)
return grads, loss
def sanity_check_integrated_gradients(integrated_gradients: np.ndarray,
predictions: List[np.ndarray]):
"""Sanity checks an integrated gradients computation.
Ideally, the sum of the integrated gradients is equal to the difference in the
predictions at the input and baseline. Any discrepancy in these two values is
due to the errors in approximating the integral.
Args:
integrated_gradients: Integrated gradients for an input and predictions.
predictions: The predicted probability distribution across all classes for
the various inputs considered in computing integrated gradients. It has
shape <steps, shape of each predictions> where 'steps' is the number of
integrated gradient steps.
"""
want_integral = np.sum(predictions[-1] - predictions[0])
got_integral = np.sum(integrated_gradients)
if want_integral == 0.0:
raise ValueError(
'FAIL: The prediction at the input is equal to that at the '
'baseline. Please use a different baseline. Some '
'suggestions are: random input, mean of the training set.')
diff = 100.0 * abs(want_integral - got_integral) / abs(want_integral)
if diff > 5.0:
raise ValueError('FAIL: Integral approximation error is too high: {} '
'percent (obtained integral: {}, expected integral: {}). '
'The acceptable limit is 5 percent. Please try increasing '
'the number of integrated gradient steps'.format(
diff, got_integral, want_integral))
logging.info(
'Integral approximation error is %s percent which is within the '
'acceptable threshold of 5 percent', diff)
def get_integrated_gradients(
input_img: Union[tf.Tensor, np.ndarray],
model: tf.keras.Model,
label: Optional[Union[tf.Tensor, np.ndarray]] = None,
mask: Optional[tf.Tensor] = None,
baseline: Optional[np.ndarray] = None,
num_steps: int = 50,
check_sanity: bool = True,
gradient_base: str = 'prediction',
is_classification: bool = True,
) -> Optional[tf.Tensor]:
"""Computes Integrated Gradients for a predicted label.
Original paper https://arxiv.org/pdf/1703.01365.pdf. The rough idea is to take
a straight line path from the baseline to the input and compute gradients at
several points along the path. Integrated gradients are obtained by taking the
cumulative (integral) of these gradients.
Args:
input_img: The input tensor. The dimensions of the tensor should match what
the model expects except for the batch dimension. For example, if model
expects a (b, h, w, c) tensor, the input should be (h, w, c).
model: The trained Keras model.
label: The actual output.
mask: The mask to apply to the prediction before obtaining gradients.
baseline: The baseline image to start with for interpolation. If None, an
image of zeros is used.
num_steps: Number of interpolation steps between the baseline and the input
used in the computation of integrated gradients. These steps along
determine the integral approximation error. By default, num_steps is set
to 50.
check_sanity: Whether to perform a sanity check of the integrated gradient
values.
gradient_base: Specifies what the gradient of inputs should be computed
against.
is_classification: Whether the model being interpreted is a classification
model.
Returns:
Returns integrated gradients with respect to the input of same shape as
input.
"""
# If baseline is not provided, start with a black image having same size as
# the input image.
if baseline is None:
baseline = np.zeros(input_img.shape)
baseline = baseline.astype(np.float32)
input_img = tf.cast(input_img, dtype=tf.float32)
# 1. Do the interpolation for given number of steps.
interpolated_image = np.linspace(baseline, input_img, num_steps + 1)
# 2. Get the gradients.
grads = []
outputs = []
for img in interpolated_image:
if gradient_base == 'loss' and label is not None:
grad, output = get_gradients_against_loss(
img,
model,
label=label,
mask=mask,
is_classification=is_classification)
else:
grad, output = get_gradients_against_prediction(img, model, mask=mask)
grads.append(grad)
outputs.append(output)
grads = tf.convert_to_tensor(grads, dtype=tf.float32)
# 3. Approximate the integral using the trapezoidal rule.
grads = (grads[:-1] + grads[1:]) / 2.0
avg_grads = tf.reduce_mean(grads, axis=0)
# 4. Calculate integrated gradients and return.
final_ig = tf.multiply((input_img - baseline), avg_grads)
if check_sanity:
try:
sanity_check_integrated_gradients(final_ig, outputs)
except ValueError:
return None
return final_ig
def get_integrated_gradients_with_retry(
input_img: Union[tf.Tensor, np.ndarray],
model: tf.keras.Model,
label: Optional[Union[tf.Tensor, np.ndarray]] = None,
mask: Optional[tf.Tensor] = None,
baseline: Optional[np.ndarray] = None,
retry_times: int = 3,
gradient_base: str = 'prediction',
is_classification: bool = True,
num_steps: int = 50,
) -> Optional[tf.Tensor]:
"""Returns sanity checked IG by doubling the number of steps in each retry."""
for _ in range(retry_times):
ig = get_integrated_gradients(
input_img=input_img,
model=model,
label=label,
mask=mask,
baseline=baseline,
num_steps=num_steps,
gradient_base=gradient_base,
is_classification=is_classification)
if ig is None:
num_steps = num_steps * 2
else:
return ig
return None
def random_baseline_integrated_gradients(input_img: Union[tf.Tensor,
np.ndarray],
model: tf.keras.Model,
mask: Optional[tf.Tensor] = None,
num_steps: int = 20,
num_runs: int = 5) -> tf.Tensor:
"""Computes Integrated Gradients using a random baseline.
Args:
input_img: The input tensor. The dimensions of the tensor should match what
the model expects except for the batch dimension. For eg., if model
expects a (b, h, w, c) tensor, the input should be (h, w, c).
model: The trained Keras model.
mask: The mask to apply to the prediction before obtaining gradients.
num_steps: Number of interpolation steps between the baseline and the input
used in the computation of integrated gradients. These steps along
determine the integral approximation error. By default, num_steps is set
to 50.
num_runs: Number of runs to do this over to pick different baselines.
Returns:
Averaged integrated gradients for `num_runs` baseline images.
"""
# 1. Get the integrated gradients for all the baselines.
integrated_grads = []
for _ in range(num_runs):
baseline = np.random.uniform(
low=np.amin(input_img), high=np.amax(input_img), size=input_img.shape)
igrads = get_integrated_gradients(
input_img=input_img,
model=model,
mask=mask,
baseline=baseline,
num_steps=num_steps,
)
integrated_grads.append(igrads)
# 2. Return the average integrated gradients for the image over all the runs.
integrated_grads = tf.convert_to_tensor(integrated_grads, dtype=tf.float32)
return tf.reduce_mean(integrated_grads, axis=0)
def get_blur_integrated_gradients(
input_img: Union[tf.Tensor, np.ndarray],
model: tf.keras.Model,
mask: Optional[tf.Tensor] = None,
grad_step: float = 0.01,
max_sigma: float = 50,
num_steps: int = 50,
check_sanity: bool = True) -> Optional[np.ndarray]:
"""Computes Integrated Gradients for a predicted label by blurring the inputs.
Original paper:
https://openaccess.thecvf.com/content_CVPR_2020/papers/Xu_Attribution_in_Scale_and_Space_CVPR_2020_paper.pdf.
One of the main advantages of this technique over the standard integrated
gradients is that it does not require a baseline.
Args:
input_img: The input tensor. The dimensions of the tensor should match what
the model expects except for the batch dimension. For example, if model
expects a (b, h, w, c) tensor, the input should be (h, w, c).
model: The trained Keras model.
mask: The mask to apply to the prediction before obtaining gradients.
grad_step: Gaussian gradient step size.
max_sigma: The maximum value of sigma to be used for blurring.
num_steps: Number of steps of blurring the input image. These steps along
determine the integral approximation error. By default, num_steps is set
to 50.
check_sanity: Whether to perform a sanity check of the integrated gradient
values.
Returns:
Returns integrated gradients with respect to the input of same shape as
input.
"""
sigmas = np.linspace(0, max_sigma, num_steps + 1)
step_vector_diff = np.diff(sigmas)
total_gradients = np.zeros_like(input_img)
preds = []
for i in range(num_steps):
blurred_img = utils.gaussian_blur(input_img, sigmas[i])
gaussian_gradient = (utils.gaussian_blur(
input_img, sigmas[i] + grad_step) - blurred_img) / grad_step
grad, pred = get_gradients_against_prediction(blurred_img, model, mask=mask)
total_gradients += step_vector_diff[i] * np.multiply(
gaussian_gradient, grad)
preds.append(pred)
if check_sanity:
try:
sanity_check_integrated_gradients(total_gradients, preds)
except ValueError:
return None
return total_gradients * -1.0
def get_blur_integrated_gradients_with_retry(
input_img: Union[tf.Tensor, np.ndarray],
model: tf.keras.Model,
mask: Optional[tf.Tensor] = None,
grad_step: float = 0.01,
max_sigma: float = 50,
retry_times: int = 3) -> Optional[np.ndarray]:
"""Returns sanity checked Blur IG by doubling steps in every retry."""
num_steps = 50
for _ in range(retry_times):
ig = get_blur_integrated_gradients(
input_img=input_img,
model=model,
mask=mask,
grad_step=grad_step,
max_sigma=max_sigma,
num_steps=num_steps)
if ig is None:
num_steps = num_steps * 2
else:
return ig
return None
| {
"content_hash": "db3146b5e74a291ef2d13c4a260ef5c1",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 111,
"avg_line_length": 37.43271767810026,
"alnum_prop": 0.670261507013463,
"repo_name": "google/ai-weather-climate",
"id": "4bbeb365adcc3eb671a71a7d7c03d747f24aaa3e",
"size": "14187",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "interpretability/saliency_maps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "36106"
},
{
"name": "Python",
"bytes": "73412"
}
],
"symlink_target": ""
} |
__author__ = "Nils Tobias Schmidt"
__email__ = "schmidt89 at informatik.uni-marburg.de"
from datetime import timedelta
import sys
from time import sleep
from time import time
from androlyze import Constants
from androlyze.util import Util
from androlyze.util.StopThread import StopThread
from Queue import Empty
class AnalysisStatsView(StopThread):
''' Updates the progress on the command line '''
def __init__(self, cnt_done, cnt_complete, analyzed_apks):
'''
Parameters
----------
cnt_done : Value<int>
Number of yet finished jobs.
cnt_complete : int
Complete count of jobs.
analyzed_apks : Queue<FastAPK>
Yet analyzed apks.
'''
super(AnalysisStatsView, self).__init__()
self.start_time = time()
self.cnt_done = cnt_done
self.cnt_complete = cnt_complete
self.cnt_analyzed_apks = analyzed_apks
self.last_analyzed_apk = "N/A"
self.last_printed_str = ""
def get_latest_analyzed_apk_name(self):
''' Get the latest analyze apk name '''
try:
fastapk = self.cnt_analyzed_apks.get_nowait()
if fastapk:
self.last_analyzed_apk = fastapk.short_description()
else:
self.last_analyzed_apk = "N/A"
except Empty:
pass
finally:
return self.last_analyzed_apk
def run(self):
''' Print progress until terminate `event` set '''
refresh_rate = Constants.PROGRESS_REFRESH_RATE / 1000.0
while not self.shall_terminate():
sleep(refresh_rate)
self.print_progess()
# print final progress before exiting
self.print_progess()
sys.stderr.write("\n")
def print_progess(self):
''' Show the progress on run '''
progress_str = Util.format_progress(self.cnt_done.value , self.cnt_complete)
time_elapsed = timedelta(seconds=round(time() - self.start_time))
progress_str = '=> [%s | %s | %s]' % (progress_str, time_elapsed, self.get_latest_analyzed_apk_name())
sys.stdout.write("\r" + " " * len(self.last_printed_str))
Util.print_dyn_progress(progress_str)
self.last_printed_str = progress_str
| {
"content_hash": "bf917b48a969caa7d52bf9b6883c44be",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 110,
"avg_line_length": 32.205479452054796,
"alnum_prop": 0.584857507443641,
"repo_name": "nachtmaar/androlyze",
"id": "80445f8d28ee7afa837161ea1351c47a8683670d",
"size": "2371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "androlyze/analyze/parallel/AnalysisStatsView.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "509940"
},
{
"name": "Shell",
"bytes": "11367"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from ytDownloader import ytDownloader
from threading import Thread
import gc
from concurrent.futures import ThreadPoolExecutor
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
def callback(d):
print 'CALLBACK: video finished', d['filename']
test = ['https://www.youtube.com/watch?v=nlg_tQ3aWxE']
ytd = ytDownloader(callback)
executor = ThreadPoolExecutor(max_workers=24)
executor.map(ytd.download, test, timeout=None)
#ytd.download('https://www.youtube.com/watch?v=nlg_tQ3aWxE')
#threads = [Thread(target=ytd.download, args=[vid]) for vid in videos]
#for thread in threads:
# thread.start()
#for thread in threads:
# thread.join()
| {
"content_hash": "a5d67492889985a243bbe8840ee0aa89",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 70,
"avg_line_length": 20.147058823529413,
"alnum_prop": 0.743065693430657,
"repo_name": "lodemo/CATANA",
"id": "77f64934faca0da3b72031f13aaedb4caa364cf9",
"size": "710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/face_recognition/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "4068"
},
{
"name": "HTML",
"bytes": "755393"
},
{
"name": "JavaScript",
"bytes": "1451186"
},
{
"name": "Jupyter Notebook",
"bytes": "12442842"
},
{
"name": "MATLAB",
"bytes": "29584"
},
{
"name": "Python",
"bytes": "5006823"
},
{
"name": "Shell",
"bytes": "154"
}
],
"symlink_target": ""
} |
"""
BLEU scoring of generated translations against reference translations.
"""
import argparse
import os
import sys
from fairseq import bleu
from fairseq.data import dictionary
def get_parser():
parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.')
# fmt: off
parser.add_argument('-s', '--sys', default='-', help='system output')
parser.add_argument('-r', '--ref', required=True, help='references')
parser.add_argument('-o', '--order', default=4, metavar='N',
type=int, help='consider ngrams up to this order')
parser.add_argument('--ignore-case', action='store_true',
help='case-insensitive scoring')
parser.add_argument('--sacrebleu', action='store_true',
help='score with sacrebleu')
parser.add_argument('--sentence-bleu', action='store_true',
help='report sentence-level BLEUs (i.e., with +1 smoothing)')
# fmt: on
return parser
def cli_main():
parser = get_parser()
args = parser.parse_args()
print(args)
assert args.sys == '-' or os.path.exists(args.sys), \
"System output file {} does not exist".format(args.sys)
assert os.path.exists(args.ref), \
"Reference file {} does not exist".format(args.ref)
dict = dictionary.Dictionary()
def readlines(fd):
for line in fd.readlines():
if args.ignore_case:
yield line.lower()
else:
yield line
if args.sacrebleu:
import sacrebleu
def score(fdsys):
with open(args.ref) as fdref:
print(sacrebleu.corpus_bleu(fdsys, [fdref]))
elif args.sentence_bleu:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
for i, (sys_tok, ref_tok) in enumerate(zip(readlines(fdsys), readlines(fdref))):
scorer.reset(one_init=True)
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(i, scorer.result_string(args.order))
else:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)):
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(scorer.result_string(args.order))
if args.sys == '-':
score(sys.stdin)
else:
with open(args.sys, 'r') as f:
score(f)
if __name__ == '__main__':
cli_main()
| {
"content_hash": "7c3089b1b7a8ddc77437785465be5688",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 96,
"avg_line_length": 34.433734939759034,
"alnum_prop": 0.5608817354793562,
"repo_name": "hfp/libxsmm",
"id": "f7c3dc42b98f1039dd4062fcfda2a42ddfe762c4",
"size": "3058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/deeplearning/sparse_training/fairseq/fairseq_cli/score.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3115"
},
{
"name": "C",
"bytes": "8335143"
},
{
"name": "C++",
"bytes": "84416"
},
{
"name": "CSS",
"bytes": "242"
},
{
"name": "Fortran",
"bytes": "102021"
},
{
"name": "HTML",
"bytes": "390"
},
{
"name": "JavaScript",
"bytes": "1062"
},
{
"name": "Makefile",
"bytes": "158870"
},
{
"name": "Python",
"bytes": "36612"
},
{
"name": "Shell",
"bytes": "84205"
},
{
"name": "Starlark",
"bytes": "882"
}
],
"symlink_target": ""
} |
import sys
import imp
imp.reload(sys)
try:
sys.setdefaultencoding('UTF8')
except Exception as E:
pass
import testValue
from popbill import ClosedownService, PopbillException
closedownService = ClosedownService(testValue.LinkID, testValue.SecretKey)
closedownService.IsTest = testValue.IsTest
closedownService.IPRestrictOnOff = testValue.IPRestrictOnOff
closedownService.UseStaticIP = testValue.UseStaticIP
closedownService.UseLocalTimeYN = testValue.UseLocalTimeYN
'''
์ฐ๋ํ์์ ํ์ฌ์ ๋ณด๋ฅผ ํ์ธํฉ๋๋ค.
- https://docs.popbill.com/closedown/python/api#GetCorpInfo
'''
try:
print("=" * 15 + " ํ์ฌ์ ๋ณด ํ์ธ " + "=" * 15)
# ํ๋นํ์ ์ฌ์
์๋ฒํธ
CorpNum = testValue.testCorpNum
response = closedownService.getCorpInfo(CorpNum)
tmp = "ceoname(๋ํ์ ์ฑ๋ช
) : " + response.ceoname + "\n"
tmp += "corpName(์ํธ) : " + response.corpName + "\n"
tmp += "addr(์ฃผ์) : " + response.addr + "\n"
tmp += "bizType(์
ํ) : " + response.bizType + "\n"
tmp += "bizClass(์ข
๋ชฉ) : " + response.bizClass + "\n"
print(tmp)
except PopbillException as PE:
print("Exception Occur : [%d] %s" % (PE.code, PE.message))
| {
"content_hash": "544fea77c89fa616778d5478786ffa53",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 74,
"avg_line_length": 26.975609756097562,
"alnum_prop": 0.6971066907775768,
"repo_name": "linkhub-sdk/popbill.closedown.example.py",
"id": "096a7b0faa1f49738a85b205bd07b53730902db2",
"size": "1273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getCorpInfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28593"
}
],
"symlink_target": ""
} |
from rosbridge_library.internal.cbor_conversion import extract_cbor_values
from rosbridge_library.internal.message_conversion import (
extract_values as extract_json_values,
)
try:
from cbor import dumps as encode_cbor
except ImportError:
from rosbridge_library.util.cbor import dumps as encode_cbor
class OutgoingMessage:
"""A message wrapper for caching encoding operations."""
def __init__(self, message):
self._message = message
self._json_values = None
self._cbor_values = None
self._cbor_msg = None
self._cbor_raw_msg = None
@property
def message(self):
return self._message
def get_json_values(self):
if self._json_values is None:
self._json_values = extract_json_values(self._message)
return self._json_values
def get_cbor_values(self):
if self._cbor_values is None:
self._cbor_values = extract_cbor_values(self._message)
return self._cbor_values
def get_cbor(self, outgoing_msg):
if self._cbor_msg is None:
outgoing_msg["msg"] = self.get_cbor_values()
self._cbor_msg = encode_cbor(outgoing_msg)
return self._cbor_msg
def get_cbor_raw(self, outgoing_msg):
if self._cbor_raw_msg is None:
self._cbor_raw_msg = encode_cbor(outgoing_msg)
return self._cbor_raw_msg
| {
"content_hash": "71169c8aad996743da5529d4eeb1636b",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 74,
"avg_line_length": 29.680851063829788,
"alnum_prop": 0.6408602150537634,
"repo_name": "RobotWebTools/rosbridge_suite",
"id": "c9d7dbc5db0ddabaf143f77262b8900047224cf5",
"size": "1395",
"binary": false,
"copies": "1",
"ref": "refs/heads/ros2",
"path": "rosbridge_library/src/rosbridge_library/internal/outgoing_message.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "4797"
},
{
"name": "Python",
"bytes": "394321"
}
],
"symlink_target": ""
} |
"""Find and register unittests.
See https://docs.python.org/3/library/unittest.html#load-tests-protocol
for details or
https://github.com/python/cpython/blob/main/Lib/unittest/test/__main__.py
for sample implementation.
"""
import os
def load_tests(loader, standard_tests, unused_pattern):
"""Our tests end in `_test.py`, so need to override the test discovery."""
this_dir = os.path.dirname(__file__)
package_tests = loader.discover(start_dir=this_dir, pattern="*_test.py")
standard_tests.addTests(package_tests)
return standard_tests
| {
"content_hash": "8c49c67a5facf104e37c1f812b0985d4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 76,
"avg_line_length": 32.35294117647059,
"alnum_prop": 0.7381818181818182,
"repo_name": "google-research/language",
"id": "3d2e70cf135c2f79058d8bf10ca487e7ab3abd5c",
"size": "1165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language/mentionmemory/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9834"
},
{
"name": "CSS",
"bytes": "602"
},
{
"name": "HTML",
"bytes": "25162"
},
{
"name": "JavaScript",
"bytes": "8857"
},
{
"name": "Jupyter Notebook",
"bytes": "1505066"
},
{
"name": "Python",
"bytes": "7139472"
},
{
"name": "Shell",
"bytes": "183709"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import fileinput
import re
import unittest2 as unittest
import socket
import os, string, sys
import tinctest
from tinctest.lib import local_path, Gpdiff, run_shell_command
from mpp.models import MPPTestCase
from mpp.lib.PSQL import PSQL
from mpp.lib.gpfilespace import Gpfilespace
from mpp.lib.filerep_util import Filerepe2e_Util
from time import sleep
from mpp.gpdb.tests.storage.lib.dbstate import DbStateClass
from mpp.lib.gprecoverseg import GpRecover
from mpp.lib.gpstart import GpStart
from mpp.lib.gpstop import GpStop
from mpp.lib.gpConfig import GpConfig
from mpp.lib.config import GPDBConfig
import subprocess
class FilerepTestCase(MPPTestCase):
def __init__(self, methodName):
self.pgport = os.environ.get('PGPORT')
self.util = Filerepe2e_Util()
self.gpconfig = GpConfig()
self.config = GPDBConfig()
self.gpr = GpRecover(self.config)
self.dbstate = DbStateClass('run_validation',self.config)
self.gpstart = GpStart()
self.gpstop = GpStop()
super(FilerepTestCase,self).__init__(methodName)
def handle_ext_cases(self,file):
"""
@file: wet sql file to replace with specific machine env.
"""
host = str(socket.gethostbyname(socket.gethostname())) #Must be an IP
querystring = "gpfdist://"+host+":8088"
if os.path.isfile(file):
for line in fileinput.FileInput(file,inplace=1):
line = re.sub('gpfdist.+8088',querystring,line)
print str(re.sub('\n','',line))
def handle_hybrid_part_cases(self, file):
"""
@file: hybrid sql file to replace with specific machine env
"""
querystring = "FROM '"+local_path('hybrid_part.data')+"'"
if os.path.isfile(file):
for line in fileinput.FileInput(file,inplace=1):
line = re.sub('FROM\s\'.+hybrid_part.data\'',querystring,line)
print str(re.sub('\n','',line))
def preprocess(self):
"""
Replace the hard-coded information from sql files with correct hostname and ip address,etc
"""
list_workload_dir = ['set_sync1','sync1','set_ck_sync1','ck_sync1',
'set_ct','ct','set_resync','resync','set_sync2','sync2']
for dir in list_workload_dir:
sql_path = os.path.join(local_path(dir),'sql')
ans_path = os.path.join(local_path(dir),'expected')
for file in os.listdir(sql_path):
if (file.find('wet_ret')>=0):
self.handle_ext_cases(os.path.join(sql_path,file))
if (file.find('hybrid_part')>=0):
self.handle_hybrid_part_cases(os.path.join(sql_path,file))
for file in os.listdir(ans_path):
if (file.find('wet_ret')>=0):
self.handle_ext_cases(os.path.join(ans_path,file))
if (file.find('hybrid_part')>=0):
self.handle_hybrid_part_cases(os.path.join(ans_path,file))
def clean_data(self):
"""
Clean the data by removing the external table, otherwise, more data will be appended to the
same external table from running multiple sql files.
"""
test = local_path("")
test = str(test) +"data/*.*"
cmd = 'rm -rfv '+test
run_shell_command(cmd)
def anydownsegments(self):
"""
checks if any segments are down
"""
tinctest.logger.info("Checking if any segments are down")
num_segments_down = self.count_of_nodes_down()
if int(num_segments_down) == 0:
return True
else:
return False
def stop_start_validate(self, stopValidate=True):
"""
Do gpstop -i, gpstart and see if all segments come back up fine
"""
tinctest.logger.info("Performing stop start validate")
tinctest.logger.info("Shutting down the cluster")
ok = self.gpstop.run_gpstop_cmd(immediate = 'i', validate=stopValidate)
if not ok and stopValidate:
raise Exception('Problem while shutting down the cluster')
tinctest.logger.info("Successfully shutdown the cluster.")
tinctest.logger.info("Restarting the cluster.")
ok = self.gpstart.run_gpstart_cmd()
if not ok:
raise Exception('Failed to bring the cluster back up')
tinctest.logger.info("Successfully restarted the cluster.")
if not self.anydownsegments():
raise Exception("segments were marked down")
else:
return (True, "All segments are up")
def method_reset_fault_injection(self):
"""
Resets fault injection
Return: (True, [result]) if OK, or (False, [result]) otherwise
"""
tinctest.logger.info("Resetting fault injection")
(ok1,out1) = self.util.inject_fault(f='filerep_resync', m = 'async', y = 'reset', r = 'primary', H ='ALL')
if not ok1:
raise Exception("Fault injection failed")
tinctest.logger.info("Done Injecting Fault to reset resync")
return (True, str(out1))
def method_resume_filerep_resync(self):
"""
Resumes the process of resync
"""
tinctest.logger.info("Resuming Resync")
(ok, out) = self.util.inject_fault(f='filerep_resync', m='async',y='resume', r='primary', H='ALL')
if not ok:
raise Exception("Fault injection failed")
tinctest.logger.info("Done resuming resync")
return (ok, out)
def run_method_suspendresync(self):
"""
Stops the cluster from going to resync
"""
tinctest.logger.info("Suspending resync")
(ok,out) = self.util.inject_fault(f='filerep_resync', m='async' , y='suspend', r ='primary', H='ALL')
tinctest.logger.info('output from suspend resync %s'%out)
if not ok:
raise Exception("Fault injection failed")
tinctest.logger.info("Done Injecting Fault to suspend resync")
return (ok, out)
def count_of_masters(self):
"""
Gives count of number of nodes in the cluster that are master
Return: count of number of nodes in the cluster that are master
"""
tinctest.logger.info("Count the number of masters")
cmd = "select count(*) from gp_segment_configuration where content = -1"
(out) = PSQL.run_sql_command(cmd)
num_master = out.split('\n')[3].strip()
return num_master
def count_of_nodes(self):
"""
Gives count of number of nodes in the cluster
Return: count of number of nodes in the cluster
"""
tinctest.logger.info("Counting number of nodes")
cmd = "select count(*) from gp_segment_configuration"
(num_cl) = PSQL.run_sql_command(cmd)
total_num_rows = num_cl.split('\n')[3].strip()
return total_num_rows
def count_of_nodes_in_ct(self):
"""
Gives count of number of nodes in change tracking
Return: count of number of nodes in change tracking
"""
tinctest.logger.info("Counting number of nodes in ct")
sqlcmd = "select count(*) from gp_segment_configuration where mode = 'c'"
(num_cl) = PSQL.run_sql_command(sqlcmd)
num_cl = num_cl.split('\n')[3].strip()
return num_cl
def count_of_nodes_down(self):
"""
Gives count of number of nodes marked as down
Return: count of number of nodes marked as down
"""
tinctest.logger.info("Counting the number of nodes down")
sqlcmd = "select count(*) from gp_segment_configuration where status = 'd'"
(num_down) = PSQL.run_sql_command(sqlcmd)
num_down = num_down.split('\n')[3].strip()
return num_down
def count_of_nodes_sync(self):
"""
Gives count of number of nodes in sync
Return: count of number of nodes in sync
"""
tinctest.logger.info("Counting the number of nodes in sync")
sqlcmd = "select count(*) from gp_segment_configuration where mode = 's'"
(num_sync) = PSQL.run_sql_command(sqlcmd)
num_sync = num_sync.split('\n')[3].strip()
return num_sync
def count_of_nodes_not_sync(self):
"""
Gives count of number of nodes not in sync
Return: count of number of nodes not in sync
"""
tinctest.logger.info("Counting number of nodes not in sync")
sqlcmd = "select count(*) from gp_segment_configuration where mode <> 's'"
(num_sync) = PSQL.run_sql_command(sqlcmd)
num_sync = num_sync.split('\n')[3].strip()
return num_sync
def inject_fault_on_first_primary(self):
"""
@product_version gpdb:[4.3.3.0-], gpdb:[4.2.8.1-4.2]
"""
tinctest.logger.info("\n Injecting faults on first primary")
(ok,out) = self.util.inject_fault(f='filerep_immediate_shutdown_request', m='async' , y='infinite_loop', r ='primary', seg_id=2, sleeptime=300)
if not ok:
raise Exception("Fault filerep_immediate_shutdown_request injection failed")
(ok,out) = self.util.inject_fault(f='fileRep_is_operation_completed', m='async' , y='infinite_loop', r ='primary', seg_id=2)
if not ok:
raise Exception("Fault fileRep_is_operation_completed injection failed")
tinctest.logger.info("\n Done Injecting Fault")
def inject_fault_on_first_mirror(self):
"""
@product_version gpdb:[4.3.3.0-], gpdb:[4.2.8.1-4.2]
"""
sqlcmd = "select dbid from gp_segment_configuration where content=0 and role='m'"
(first_mirror_dbid) = PSQL.run_sql_command(sqlcmd)
first_mirror_dbid = first_mirror_dbid.split('\n')[3].strip()
tinctest.logger.info("\n Injecting faults on first mirror")
flag = self.util.check_fault_status(fault_name='fileRep_is_operation_completed', status='triggered', max_cycle=100);
if not flag:
raise Exception("Fault fileRep_is_operation_completed didn't trigger")
(ok,out) = self.util.inject_fault(f='filerep_consumer', m='async' , y='panic', r ='mirror', seg_id=first_mirror_dbid)
if not ok:
raise Exception("Fault filerep_consumer injection failed")
tinctest.logger.info("\n Done Injecting Fault")
def setupGpfdist(self, port, path):
gpfdist = Gpfdist(port , self.hostIP())
gpfdist.killGpfdist()
gpfdist.startGpfdist(' -t 30 -m 1048576 -d '+path)
return True
def cleanupGpfdist(self, port,path):
gpfdist = Gpfdist(port , self.hostIP())
gpfdist.killGpfdist()
return True
def hostIP(self):
ok = run_shell_command('which gpfdist')
if not ok:
raise GPtestError("Error:'which gpfdist' command failed.")
hostname = socket.gethostname()
if hostname.find('mdw') > 0 :
host = 'mdw'
else:
host = str(socket.gethostbyname(socket.gethostname())) #Must be an IP
tinctest.logger.info('current host is %s'%host)
return host
def method_setup(self):
tinctest.logger.info("Performing setup tasks")
gpfs=Gpfilespace()
gpfs.create_filespace('filerep_fs_a')
gpfs.create_filespace('filerep_fs_b')
gpfs.create_filespace('filerep_fs_c')
gpfs.create_filespace('filerep_fs_z')
gpfs.create_filespace('sync1_fs_1')
# Set max_resource_queues to 100
cmd = 'gpconfig -c max_resource_queues -v 100 '
ok = run_shell_command(cmd)
if not ok:
raise Exception('Failure during setting the max_resource_queues value to 100 using gpconfig tool')
#Restart the cluster
self.gpstop.run_gpstop_cmd(immediate = 'i')
ok = self.gpstart.run_gpstart_cmd()
if not ok:
raise Exception('Failure during restarting the cluster')
return True
def get_ext_table_query_from_gpstate(self):
outfile = local_path("gpstate_tmp")
ok = run_shell_command("gpstate --printSampleExternalTableSql >"+ outfile)
querystring = ""
flag = 'false'
out = open(outfile, 'r').readlines()
for line in out:
line.strip()
if (line.find('DROP EXTERNAL TABLE IF EXISTS gpstate_segment_status')>=0):
flag = 'true'
if flag == 'true':
querystring = querystring + line
return querystring ############RUN QYUERY
def check_gpstate(self, type, phase):
"""
Perform gpstate for each different transition state
@type: failover type
@phase: transition stage, can be sync1, ck_sync1, ct, resync, sync2
"""
if phase == 'sync1':
state_num = self.query_select_count("select count(*) from gpstate_segment_status where role = preferred_role and mirror_status ='Synchronized' and status_in_config='Up' and instance_status='Up'")
sync1_num = self.query_select_count("select count(*) from gp_segment_configuration where content <> -1")
if int(sync1_num) <> int(state_num):
raise Exception("gpstate in Sync state failed")
tinctest.logger.info("Done Running gpstate in %s phase " %(phase))
elif phase == 'ct':
p_num = self.query_select_count("select count(*) from gpstate_segment_status where role = preferred_role and mirror_status ='Change Tracking' and role = 'Primary' and status_in_config='Up' and instance_status='Up'")
m_num = self.query_select_count("select count(*) from gpstate_segment_status where role = preferred_role and mirror_status ='Out of Sync' and role = 'Mirror' and status_in_config='Down' and instance_status='Down in configuration' ")
if int(p_num) <> int(m_num):
raise Exception("gpstate in CT state failed")
tinctest.logger.info("Done Running gpstate in %s phase " %(phase))
elif phase == 'resync_incr':
if type == 'primary':
query = "select count(*) from gpstate_segment_status where role = preferred_role and mirror_status ='Resynchronizing' and status_in_config='Up' and instance_status='Up' and resync_mode= 'Incremental'"
resync_incr_num = self.query_select_count(query)
else:
query = "select count(*) from gpstate_segment_status where mirror_status ='Resynchronizing' and status_in_config='Up' and instance_status='Up' and resync_mode= 'Incremental'"
resync_incr_num = self.query_select_count(query)
query_num_rows = "select count(*) from gp_segment_configuration where content <> -1"
num_rows = self.query_select_count(query_num_rows)
if int(resync_incr_num) <> int(num_rows):
tinctest.logger.info("resync_incr_num query run %s" % query)
tinctest.logger.info("num_rows query run %s" % query_num_rows)
raise Exception("gpstate in Resync Incremental state failed. resync_incr_num %s <> num_rows %s" % (resync_incr_num, num_rows))
tinctest.logger.info("Done Running gpstate in %s phase " %(phase))
elif phase == 'resync_full':
num_rows = self.query_select_count("select count(*) from gp_segment_configuration where content <> -1")
if type == 'primary':
resync_full_num = self.query_select_count("select count(*) from gpstate_segment_status where role = preferred_role and mirror_status ='Resynchronizing' and status_in_config='Up' and instance_status='Up' and resync_mode= 'Full'")
else:
resync_full_num = self.query_select_count("select count(*) from gpstate_segment_status where mirror_status ='Resynchronizing' and status_in_config='Up' and instance_status='Up' and resync_mode= 'Full'")
if int(resync_full_num) <> int(num_rows):
raise Exception("gptate in Resync Full state failed")
tinctest.logger.info("Done Running gpstate in %s phase " %(phase))
return True
def trigger_transition(self):
PSQL.run_sql_file(local_path('mirrors.sql'))
def run_gpstate(self, type, phase):
"""
Perform gpstate for each different transition state
@type: failover type
@phase: transition stage, can be sync1, ck_sync1, ct, resync, sync2
"""
tinctest.logger.info("running gpstate")
querystring = self.get_ext_table_query_from_gpstate()
file1 = local_path('create_table_gpstate.sql')
f1 = open(file1,'w')
f1.write(querystring)
f1.write('\n')
f1.close()
PSQL.run_sql_file(local_path('create_table_gpstate.sql'))
gpstate_outfile = local_path('gpstate_out')
cmd = 'gpstate -s -a > %s 2>&1' % (gpstate_outfile)
ok = run_shell_command(cmd)
self.check_gpstate(type, phase)
return ok
def check_mirror_seg(self, master=False):
tinctest.logger.info("running check mirror")
self.dbstate.check_mirrorintegrity()
def do_gpcheckcat(self, dbname=None, alldb=False, online=False, outputFile='checkcat.out', outdir=None):
tinctest.logger.info("running gpcheckcat")
self.dbstate.check_catalog(outputFile=outputFile)
def query_select_count(self,sqlcmd):
(num) = PSQL.run_sql_command(sqlcmd)
num = num.split('\n')[3].strip()
return num
def method_run_failover(self,type):
"""
Inject fault to failover nodes
@type: primary [induces fault in mirror] mirror [creates panic in primary]
Return: (True, [result of fault injection]) if OK, or (False, [result of fault injection]) otherwise
"""
if type == 'primary':
tinctest.logger.info("\n primary failover")
(ok,out) = self.util.inject_fault(f='filerep_consumer', m='async' , y='fault', r ='mirror', H='ALL')
tinctest.logger.info("\n Done Injecting Fault")
elif type == 'mirror':
tinctest.logger.info("\n Mirror failover")
(ok,out) = self.util.inject_fault(f='postmaster', m='async' , y='panic', r ='primary', H='ALL')
tinctest.logger.info("\n Done Injecting Fault")
return True
def wait_till_change_tracking_transition(self):
self.util.wait_till_change_tracking_transition()
def wait_till_insync_transition(self):
self.gpr.wait_till_insync_transition()
def run_gprecoverseg(self,recover_mode):
if recover_mode == 'full':
self.gpr.full()
else:
self.gpr.incremental()
def run_gpconfig(self, parameter, master_value, segment_value):
if (parameter is not None):
self.gpconfig.setParameter(parameter, master_value, segment_value)
self.gpstop.run_gpstop_cmd(restart='r')
def inject_fault(self, fault = None, mode = None, operation = None, prim_mirr = None, host = 'All', table = None, database = None, seg_id = None, sleeptime = None, occurence = None):
if (fault == None or mode == None or operation == None or prim_mirr == None):
raise Exception('Incorrect parameters provided for inject fault')
(ok,out) = self.util.inject_fault(f=fault, m=mode , y=operation, r=prim_mirr, H='ALL', table=table, database=database, sleeptime=sleeptime, o=occurence, seg_id=seg_id)
class Gpfdist:
def __init__(self, port, hostname):
"init"
self.port = port
self.hostname = hostname
def startGpfdist(self, options):
tinctest.logger.info("start hosting the data")
p = subprocess.Popen(['ssh', self.hostname, 'source %s/greenplum_path.sh && gpfdist -p %s %s & ' % (os.environ['GPHOME'], self.port, options)])
tinctest.logger.info("gpfdist cmd: gpfdist" + options)
if sys.platform.find("darwin") >= 0:
sleep(10)
else:
sleep(5)
return True
def killGpfdist(self):
tinctest.logger.info("kill the gpfdist process")
cmd = 'ps -ef|grep \"gpfdist\"|grep -v grep|awk \'{print $2}\''
process = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
pid_list = out.strip().split('\n')
for pid in pid_list:
self.killProcbyPid(pid.strip())
return True
def killProcbyPid(self, pid=None):
if pid:
result = run_shell_command('kill -9 %s > /dev/null'%pid)
return result
else:
tinctest.logger.info("pid is None and not valid to kill")
| {
"content_hash": "ca7e2d5236cbeca48c6669ef937af6af",
"timestamp": "",
"source": "github",
"line_count": 529,
"max_line_length": 247,
"avg_line_length": 40.965973534971646,
"alnum_prop": 0.6079091873932906,
"repo_name": "xuegang/gpdb",
"id": "6a4ba9335eaa98f2874b8aeaf4b15fec40c39b37",
"size": "21671",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/test/tinc/tincrepo/mpp/gpdb/tests/storage/filerep_end_to_end/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5614"
},
{
"name": "Batchfile",
"bytes": "11028"
},
{
"name": "C",
"bytes": "35361773"
},
{
"name": "C++",
"bytes": "8133472"
},
{
"name": "CMake",
"bytes": "47394"
},
{
"name": "CSS",
"bytes": "7068"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "Cucumber",
"bytes": "927827"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "FORTRAN",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Groff",
"bytes": "703079"
},
{
"name": "HTML",
"bytes": "218703"
},
{
"name": "Java",
"bytes": "1011277"
},
{
"name": "Lex",
"bytes": "210708"
},
{
"name": "M4",
"bytes": "106028"
},
{
"name": "Makefile",
"bytes": "497542"
},
{
"name": "Objective-C",
"bytes": "24186"
},
{
"name": "PLSQL",
"bytes": "190951"
},
{
"name": "PLpgSQL",
"bytes": "53337057"
},
{
"name": "Perl",
"bytes": "4082990"
},
{
"name": "Perl6",
"bytes": "14219"
},
{
"name": "Python",
"bytes": "9782036"
},
{
"name": "Ruby",
"bytes": "3301"
},
{
"name": "SQLPL",
"bytes": "1892720"
},
{
"name": "Shell",
"bytes": "504084"
},
{
"name": "XS",
"bytes": "8309"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "485235"
}
],
"symlink_target": ""
} |
from django.core.cache import cache
from django.db.models.signals import post_delete, post_save
import functools
import hashlib
WEEK = 7 * 24 * 60 * 60 # 1 week
def cache_me(key=None, ikey=None, signals=(), models=(), timeout=WEEK):
def hashme(k):
if isinstance(k, unicode):
k = k.encode('utf-8')
return hashlib.md5(k).hexdigest()
def decorator(f):
def invalidate(sender, **kwargs):
if ikey is None:
ks = (f.__name__,)
elif callable(ikey):
k = ikey(sender, **kwargs)
if isinstance(k, basestring):
ks = (k,)
else:
ks = k
else:
ks = (ikey,)
if ks:
cache.delete_many(map(hashme, ks))
if ikey or (ikey is None and key is None):
for s in signals:
s.connect(invalidate, weak=False)
for m in models:
post_save.connect(invalidate, sender=m, weak=False)
post_delete.connect(invalidate, sender=m, weak=False)
def _key(*args, **kwargs):
if key is None:
k = f.__name__
elif callable(key):
k = key(*args, **kwargs)
else:
k = key % args
return hashme(k)
@functools.wraps(f)
def wrapper(*args, **kwargs):
k = _key(*args, **kwargs)
data = cache.get(k)
if data is None:
data = f(*args, **kwargs)
cache.set(k, data, timeout)
return data
wrapper.cachekey = _key
return wrapper
return decorator
from collections import defaultdict
from django.conf import settings as dsettings
from django.contrib import comments
from django.core.urlresolvers import reverse
from microblog import models
from microblog import settings
from taggit.models import TaggedItem
def _i_post_list(sender, **kw):
ks = []
for l in dsettings.LANGUAGES:
ks.append('m:post_list:%s' % l[0])
return ks
@cache_me(models=(models.Post,),
key='m:post_list:%s',
ikey=_i_post_list)
def post_list(lang):
qs = models.Post.objects\
.all()\
.byLanguage(lang)\
.order_by('-date')\
.select_related('category', 'author')
return list(qs)
@cache_me(models=(models.Post,))
def tag_map():
tmap = defaultdict(set)
items = TaggedItem.objects\
.filter(content_type__app_label='microblog', content_type__model='post')\
.select_related('tag')
for o in items:
tmap[o.object_id].add(o.tag)
return tmap
@cache_me(models=(models.Post,),
key = 'm:tagged_posts:%s',
ikey = 'm:tagged_posts:%s')
def tagged_posts(name):
"""
restituisce i post taggati con il tag passato
"""
posts = TaggedItem.objects\
.filter(content_type__app_label='microblog', content_type__model='post')\
.filter(tag__name__iexact=name)\
.values_list('object_id', flat=True)
return set(posts)
def _i_post_data(sender, **kw):
if sender is models.Post:
pid = kw['instance'].id
elif sender is comments.get_model():
o = kw['instance']
if o.content_type.app_label == 'microblog' and o.content_type.model == 'post':
pid = o.object_pk
else:
pid = None
else:
pid = kw['instance'].post_id
ks = []
if pid:
for l in dsettings.LANGUAGES:
ks.append('m:post_data:%s%s' % (pid, l[0]))
return ks
@cache_me(models=(models.Post, models.PostContent, comments.get_model()),
key='m:post_data:%s%s',
ikey=_i_post_data)
def post_data(pid, lang):
post = models.Post.objects\
.select_related('author', 'category')\
.get(id=pid)
try:
content = post.content(lang=lang, fallback=True)
except models.PostContent.DoesNotExist:
content = None
comment_list = comments.get_model().objects\
.filter(content_type__app_label='microblog', content_type__model='post')\
.filter(object_pk=pid, is_public=True)
burl = models.PostContent.build_absolute_url(post, content)
return {
'post': post,
'content': content,
'url': dsettings.DEFAULT_URL_PREFIX + reverse(burl[0], args=burl[1], kwargs=burl[2]),
'comments': list(comment_list),
'tags': list(post.tags.all()),
}
def _i_get_reactions(sender, **kw):
if sender is models.Trackback:
return 'm:reaction:%s' % kw['instance'].content_id
else:
return 'm:reaction:%s' % kw['instance'].object_id
if settings.MICROBLOG_PINGBACK_SERVER:
deco = cache_me(models=(models.Trackback,),
key='m:reactions:%s',
ikey=_i_get_reactions)
else:
from pingback.models import Pingback
deco = cache_me(models=(models.Trackback, Pingback),
key='m:reactions:%s',
ikey=_i_get_reactions)
@deco
def get_reactions(cid):
trackbacks = models.Trackback.objects.filter(content=cid)
if settings.MICROBLOG_PINGBACK_SERVER:
from pingback.models import Pingback
# Purtroppo il metodo pingbacks_for_object vuole un oggetto non un id
content = models.PostContent.objects.get(id=cid)
pingbacks = Pingback.objects.pingbacks_for_object(content).filter(approved=True)
else:
pingbacks = []
reactions = sorted(list(trackbacks) + list(pingbacks), key=lambda r: r.date, reverse=True)
# normalizzo le reactions, mi assicuro che tutte abbiano un excerpt
for ix, r in enumerate(reactions):
if not hasattr(r, 'excerpt'):
r.excerpt = r.content
return reactions
| {
"content_hash": "8baca7f317cb85fa8fd938d06df52a3a",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 94,
"avg_line_length": 32.691428571428574,
"alnum_prop": 0.583114840062926,
"repo_name": "matrixise/microblog",
"id": "0074ec75e6b9dc5d25eb210e29f9ea24c69018c7",
"size": "5746",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "microblog/dataaccess.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "10905"
},
{
"name": "Python",
"bytes": "161878"
}
],
"symlink_target": ""
} |
import sys
import numpy as np
try:
import wx
from wx import glcanvas
except ImportError:
raise ImportError, "Required dependency wx.glcanvas not present"
try:
from OpenGL.GL import *
except ImportError:
raise ImportError, "Required dependency OpenGL not present"
class GLFrame(wx.Frame):
"""A simple class for using OpenGL with wxPython."""
def __init__(self, parent, id, title, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,
name='frame'):
#
# Forcing a specific style on the window.
# Should this include styles passed?
style = wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE
super(GLFrame, self).__init__(parent, id, title, pos, size, style, name)
self.GLinitialized = False
attribList = (glcanvas.WX_GL_RGBA, # RGBA
glcanvas.WX_GL_DOUBLEBUFFER, # Double Buffered
glcanvas.WX_GL_DEPTH_SIZE, 24) # 24 bit
#
# Create the canvas
self.canvas = glcanvas.GLCanvas(self, attribList=attribList)
#
# Set the event handlers.
self.canvas.Bind(wx.EVT_ERASE_BACKGROUND, self.processEraseBackgroundEvent)
self.canvas.Bind(wx.EVT_SIZE, self.processSizeEvent)
self.canvas.Bind(wx.EVT_PAINT, self.processPaintEvent)
#
# Canvas Proxy Methods
def GetGLExtents(self):
"""Get the extents of the OpenGL canvas."""
return self.canvas.GetClientSize()
def SwapBuffers(self):
"""Swap the OpenGL buffers."""
self.canvas.SwapBuffers()
#
# wxPython Window Handlers
def processEraseBackgroundEvent(self, event):
"""Process the erase background event."""
pass # Do nothing, to avoid flashing on MSWin
def processSizeEvent(self, event):
"""Process the resize event."""
if self.canvas.GetContext():
# Make sure the frame is shown before calling SetCurrent.
self.Show()
self.canvas.SetCurrent()
size = self.GetGLExtents()
self.OnReshape(size.width, size.height)
self.canvas.Refresh(False)
event.Skip()
def processPaintEvent(self, event):
"""Process the drawing event."""
self.canvas.SetCurrent()
# This is a 'perfect' time to initialize OpenGL ... only if we need to
if not self.GLinitialized:
self.OnInitGL()
self.GLinitialized = True
self.OnDraw()
event.Skip()
#
# GLFrame OpenGL Event Handlers
def OnInitGL(self):
"""Initialize OpenGL for use in the window."""
glClearColor(1, 1, 1, 1)
def OnReshape(self, width, height):
"""Reshape the OpenGL viewport based on the dimensions of the window."""
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-0.5, 0.5, -0.5, 0.5, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def OnDraw(self, *args, **kwargs):
"Draw the window."
glClear(GL_COLOR_BUFFER_BIT)
# Drawing an example triangle in the middle of the screen
glBegin(GL_TRIANGLES)
glColor(0, 0, 0)
glVertex(-.25, -.25)
glVertex(.25, -.25)
glVertex(0, .25)
glEnd()
self.SwapBuffers()
#app = wx.PySimpleApp()
#frame = GLFrame(None, -1, 'GL Window')
#frame.Show()
#app.MainLoop()
#app.Destroy()
def jumeg_tsv_gui():
app = wx.App()
frame = GLFrame(None, -1,'JuMEG TSV')
frame.Show()
app.MainLoop()
if __name__ == '__main__':
jumeg_tsv_gui()
| {
"content_hash": "aa705ed35b758f50c2f15af04ace15aa",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 83,
"avg_line_length": 26.34285714285714,
"alnum_prop": 0.6005965292841648,
"repo_name": "fboers/jumegX",
"id": "080a6573a12eefefd5d33ccfae7d2c8b432cd747",
"size": "3688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tsvgl/old/jumeg_tsv_gui_V0001.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "GLSL",
"bytes": "261"
},
{
"name": "Perl",
"bytes": "90959"
},
{
"name": "Python",
"bytes": "1434828"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(name='physcon',
version='0.1',
description='Physical constants and units',
author='Kim G L Pedersen',
author_email='georglind@gmail.com',
py_modules=['physcon'],
url = 'http://github.com/georglind/physcon'
)
| {
"content_hash": "c3a88a09a6a85f186a3b661f410f6ce2",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 49,
"avg_line_length": 28.9,
"alnum_prop": 0.6366782006920415,
"repo_name": "georglind/physcon",
"id": "582372e1d7fa441bdfc81b8a96c5f894cec80c43",
"size": "312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5109"
}
],
"symlink_target": ""
} |
from django import forms
from django.utils.translation import ugettext_lazy as _
from message_coding.apps.project import models
class ProjectForm(forms.ModelForm):
"""
Form for creating new projects.
"""
class Meta:
model = models.Project
# Choose which fields to show on the form
fields = ['name', 'slug', 'description', 'members']
# Customize label text and add help messages
labels = {
'slug': _('Short code')
}
help_texts = {
'slug': _('A unique name containing only letters and hyphens (e.g. my-cool-project)')
}
| {
"content_hash": "025b0ba9cdb72ca65a67d22cb0b0cb18",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 97,
"avg_line_length": 25.16,
"alnum_prop": 0.6025437201907791,
"repo_name": "michaelbrooks/uw-message-coding",
"id": "c30a0bd3a8dc7a24d33da06afa538584933afc3e",
"size": "629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "message_coding/apps/project/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20931"
},
{
"name": "HTML",
"bytes": "47615"
},
{
"name": "JavaScript",
"bytes": "30966"
},
{
"name": "Python",
"bytes": "123216"
},
{
"name": "Shell",
"bytes": "13307"
}
],
"symlink_target": ""
} |
from nose.tools import *
from exercises import ex13
def test_max_in_list():
'''
Confirm we are finding the largest number
'''
test_max_in_list = ex13.max_in_list([1, 2, 3, 4, 2, 6, 2])
assert_equal(test_max_in_list, 6)
| {
"content_hash": "922ad7296e7724d8c9f5d821a43b0175",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 62,
"avg_line_length": 22,
"alnum_prop": 0.6239669421487604,
"repo_name": "gravyboat/python-exercises",
"id": "14cdc39fabf7404442db9748cb5c228de4c0cd78",
"size": "242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ex13_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19625"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from reversion.admin import VersionAdmin
from test_app.models import TestModel, TestModelRelated
class TestModelAdmin(VersionAdmin):
filter_horizontal = ("related",)
admin.site.register(TestModel, TestModelAdmin)
admin.site.register(TestModelRelated, admin.ModelAdmin)
| {
"content_hash": "0010be1d5a879e99e86ccef7ca14a70c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 55,
"avg_line_length": 22.357142857142858,
"alnum_prop": 0.8115015974440895,
"repo_name": "etianen/django-reversion",
"id": "c6641231097165bb8e5b1b6db3b2520ec262320b",
"size": "313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_app/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6265"
},
{
"name": "Python",
"bytes": "130019"
}
],
"symlink_target": ""
} |
from ophyd.controls import EpicsSignal
# Helper function for generating Epics PVs
def getEpicsPV(basename, suffix):
rv = ''.join([basename, suffix])
return rv
# Helper function for generating Epics Signals
def getEpicsSignal(basename, suffix, name = None, rw = False):
pvname = getEpicsPV(basename, suffix)
rv = EpicsSignal(pvname, rw = rw, name = name)
return rv
| {
"content_hash": "78d82e0f8c84d7a3b530ed10cb99d3e5",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 62,
"avg_line_length": 32.166666666666664,
"alnum_prop": 0.7202072538860104,
"repo_name": "NSLS-II-XPD/ipython_ophyd",
"id": "22e39be165ee5d2b72f0d21cdd4508f3d1d99109",
"size": "409",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "profile_collection/pescan/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "864"
},
{
"name": "JavaScript",
"bytes": "10578"
},
{
"name": "Python",
"bytes": "776274"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from aiosocks.connector import ProxyConnector, ProxyClientRequest, Socks4Auth, Socks5Auth
from aiohttp import ClientSession, ClientError, BasicAuth
from .common import *
import logging
HTTPResponse = namedtuple('HTTPResponse',
['version', 'status', 'reason', 'url', 'cookies', 'content_type', 'charset', 'headers',
'content', 'raw', 'proxy_used'])
async def _response_to_tuple(response, proxy_used):
try:
content = await response.text()
except UnicodeDecodeError:
pass
raw = await response.read()
return HTTPResponse(response.version, response.status, response.reason, response.url, response.cookies,
response.content_type, response.charset, response.headers, content, raw, proxy_used)
class HttpConnection:
def __init__(self, cfg):
self.logger = logging.getLogger(__name__)
self.cfg = cfg
self.session = None
self.proxy_url, self.proxy_auth, self.proxy_only = self._get_auth()
self.create_sessions()
def create_sessions(self):
if self.session:
return
con = ProxyConnector(remote_resolve=True)
session_cfg = dict(raise_for_status=True, connector=con, request_class=ProxyClientRequest)
session_cfg.update(self.cfg.get('timeouts', {}))
self.session = ClientSession(**session_cfg) # error if return code >= 400
def _get_proxy_url(self):
if 'url' not in self.cfg['proxy']:
raise InvalidConfigError("proxy have no url configured")
url = self.cfg['proxy']['url']
if not isinstance(url, str):
raise InvalidConfigError("proxy url should be string")
return url
def _get_proxy_auth(self, url):
schema = url.split('://')[0]
if schema not in {'socks4', 'socks5', 'http'}:
raise InvalidConfigError(f"in valid schema for url {url}")
if 'auth' not in self.cfg['proxy']:
return None
if schema == "socks4":
auth = dict(login='')
else:
auth = dict(login='', password='')
auth.update(self.cfg['proxy']['auth'])
if schema == 'socks5':
return Socks5Auth(**auth)
elif schema == 'socks4':
return Socks4Auth(**auth)
elif schema == 'http':
return BasicAuth(**auth)
else:
raise InvalidConfigError(f"in valid schema for url {url}")
def _get_auth(self):
if 'proxy' not in self.cfg:
return None, None, False
url = self._get_proxy_url()
auth = self._get_proxy_auth(url)
proxy_only = self.cfg['proxy'].get('proxy_only', False)
return url, auth, proxy_only
async def request(self, url, proxy_first=False):
self.create_sessions()
if not self.proxy_url:
return await self._request(url)
if self.proxy_only:
return await self._request_proxy(url)
step1, step2 = (self._request_proxy, self._request) if proxy_first else (self._request, self._request_proxy)
try:
return await step1(url)
except ClientError as e:
self.logger.debug("failed the first request to %s, proxy_first=%r, try another way", url, proxy_first)
pass
return await step2(url)
async def _request_proxy(self, url):
async with self.session.get(url, proxy=self.proxy_url, proxy_auth=self.proxy_auth) as response:
return await _response_to_tuple(response, True)
async def _request(self, url):
async with self.session.get(url) as response:
return await _response_to_tuple(response, False)
def close(self):
if self.session:
self.session.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close() | {
"content_hash": "3be274fc5412530cae6c009fbfcacc58",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 116,
"avg_line_length": 34.86725663716814,
"alnum_prop": 0.6007614213197969,
"repo_name": "dahakawang/feedme",
"id": "de8b2a098ef81d35454a481e6b9547b0a85e14f2",
"size": "3940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robot/http_connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "144"
},
{
"name": "JavaScript",
"bytes": "1925"
},
{
"name": "Python",
"bytes": "35850"
}
],
"symlink_target": ""
} |
import argparse
import os, audioop, numpy, glob, scipy, subprocess, wave, cPickle, threading, shutil, ntpath
import matplotlib.pyplot as plt
import audioFeatureExtraction as aF
import audioTrainTest as aT
import audioSegmentation as aS
import audioVisualization as aV
import audioBasicIO
import utilities as uT
import scipy.io.wavfile as wavfile
import matplotlib.patches
def dirMp3toWavWrapper(directory, samplerate, channels):
if not os.path.isdir(directory):
raise Exception("Input path not found!")
useMp3TagsAsNames = True
audioBasicIO.convertDirMP3ToWav(directory, samplerate, channels, useMp3TagsAsNames)
def dirWAVChangeFs(directory, samplerate, channels):
if not os.path.isdir(directory):
raise Exception("Input path not found!")
audioBasicIO.convertFsDirWavToWav(directory, samplerate, channels)
def featureExtractionFileWrapper(wavFileName, outFile, mtWin, mtStep, stWin, stStep):
if not os.path.isfile(wavFileName):
raise Exception("Input audio file not found!")
aF.mtFeatureExtractionToFile(wavFileName, mtWin, mtStep, stWin, stStep, outFile, True, True, True)
def beatExtractionWrapper(wavFileName, plot):
if not os.path.isfile(wavFileName):
raise Exception("Input audio file not found!")
[Fs, x] = audioBasicIO.readAudioFile(wavFileName);
F = aF.stFeatureExtraction(x, Fs, 0.050*Fs, 0.050*Fs);
BPM, ratio = aF.beatExtraction(F, 0.050, plot)
print "Beat: {0:d} bpm ".format(int(BPM))
print "Ratio: {0:.2f} ".format(ratio)
def featureExtractionDirWrapper(directory, mtWin, mtStep, stWin, stStep):
if not os.path.isdir(directory):
raise Exception("Input path not found!")
aF.mtFeatureExtractionToFileDir(directory, mtWin, mtStep, stWin, stStep, True, True, True)
def featureVisualizationDirWrapper(directory):
if not os.path.isdir(directory):
raise Exception("Input folder not found!")
aV.visualizeFeaturesFolder(directory, "pca", "")
def fileSpectrogramWrapper(wavFileName):
if not os.path.isfile(wavFileName):
raise Exception("Input audio file not found!")
[Fs, x] = audioBasicIO.readAudioFile(wavFileName)
x = audioBasicIO.stereo2mono(x)
specgram, TimeAxis, FreqAxis = aF.stSpectogram(x, Fs, round(Fs*0.040), round(Fs*0.040), True)
def fileChromagramWrapper(wavFileName):
if not os.path.isfile(wavFileName):
raise Exception("Input audio file not found!")
[Fs, x] = audioBasicIO.readAudioFile(wavFileName)
x = audioBasicIO.stereo2mono(x)
specgram, TimeAxis, FreqAxis = aF.stChromagram(x, Fs, round(Fs*0.040), round(Fs*0.040), True)
def trainClassifierWrapper(method, beatFeatures, directories, modelName):
if len(directories) < 2:
raise Exception("At least 2 directories are needed")
aT.featureAndTrain(directories, 1, 1, aT.shortTermWindow, aT.shortTermStep,
method.lower(), modelName, computeBEAT = beatFeatures)
def trainRegressionWrapper(method, beatFeatures, dirName, modelName):
aT.featureAndTrainRegression(dirName, 1, 1, aT.shortTermWindow, aT.shortTermStep,
method.lower(), modelName, computeBEAT = beatFeatures)
def classifyFileWrapper(inputFile, modelType, modelName):
if not os.path.isfile(modelName):
raise Exception("Input modelName not found!")
if not os.path.isfile(inputFile):
raise Exception("Input audio file not found!")
[Result, P, classNames] = aT.fileClassification(inputFile, modelName, modelType)
print "{0:s}\t{1:s}".format("Class","Probability")
for i,c in enumerate(classNames):
print "{0:s}\t{1:.2f}".format(c,P[i])
print "Winner class: " + classNames[int(Result)]
def regressionFileWrapper(inputFile, modelType, modelName):
if not os.path.isfile(inputFile):
raise Exception("Input audio file not found!")
R, regressionNames = aT.fileRegression(inputFile, modelName, modelType)
for i in range(len(R)):
print "{0:s}\t{1:.3f}".format(regressionNames[i], R[i])
def classifyFolderWrapper(inputFolder, modelType, modelName, outputMode=False):
if not os.path.isfile(modelName):
raise Exception("Input modelName not found!")
files = "*.wav"
if os.path.isdir(inputFolder):
strFilePattern = os.path.join(inputFolder, files)
else:
strFilePattern = inputFolder + files
wavFilesList = []
wavFilesList.extend(glob.glob(strFilePattern))
wavFilesList = sorted(wavFilesList)
if len(wavFilesList)==0:
print "No WAV files found!"
return
Results = []
for wavFile in wavFilesList:
[Result, P, classNames] = aT.fileClassification(wavFile, modelName, modelType)
Result = int(Result)
Results.append(Result)
if outputMode:
print "{0:s}\t{1:s}".format(wavFile,classNames[Result])
Results = numpy.array(Results)
# print distribution of classes:
[Histogram, _] = numpy.histogram(Results, bins=numpy.arange(len(classNames)+1))
for i,h in enumerate(Histogram):
print "{0:20s}\t\t{1:d}".format(classNames[i], h)
def regressionFolderWrapper(inputFolder, modelType, modelName):
files = "*.wav"
if os.path.isdir(inputFolder):
strFilePattern = os.path.join(inputFolder, files)
else:
strFilePattern = inputFolder + files
wavFilesList = []
wavFilesList.extend(glob.glob(strFilePattern))
wavFilesList = sorted(wavFilesList)
if len(wavFilesList)==0:
print "No WAV files found!"
return
Results = []
for wavFile in wavFilesList:
R, regressionNames = aT.fileRegression(wavFile, modelName, modelType)
Results.append(R)
Results = numpy.array(Results)
for i, r in enumerate(regressionNames):
[Histogram, bins] = numpy.histogram(Results[:, i])
centers = (bins[0:-1] + bins[1::]) / 2.0
plt.subplot(len(regressionNames), 1, i);
plt.plot(centers, Histogram)
plt.title(r)
plt.show()
def trainHMMsegmenter_fromfile(wavFile, gtFile, hmmModelName, mtWin, mtStep):
if not os.path.isfile(wavFile):
print "Error: wavfile does not exist!"; return
if not os.path.isfile(gtFile):
print "Error: groundtruth does not exist!"; return
aS.trainHMM_fromFile(wavFile, gtFile, hmmModelName, mtWin, mtStep)
def trainHMMsegmenter_fromdir(directory, hmmModelName, mtWin, mtStep):
if not os.path.isdir(directory):
raise Exception("Input folder not found!")
aS.trainHMM_fromDir(directory, hmmModelName, mtWin, mtStep)
def segmentclassifyFileWrapper(inputWavFile, modelName, modelType):
if not os.path.isfile(modelName):
raise Exception("Input modelName not found!")
if not os.path.isfile(inputWavFile):
raise Exception("Input audio file not found!")
gtFile = inputWavFile.replace(".wav", ".segments")
aS.mtFileClassification(inputWavFile, modelName, modelType, True, gtFile)
def segmentclassifyFileWrapperHMM(wavFile, hmmModelName):
gtFile = wavFile.replace(".wav", ".segments");
aS.hmmSegmentation(wavFile, hmmModelName, PLOT = True, gtFileName = gtFile)
def segmentationEvaluation(dirName, modelName, methodName):
aS.evaluateSegmentationClassificationDir(dirName, modelName, methodName)
def silenceRemovalWrapper(inputFile, smoothingWindow, weight):
if not os.path.isfile(inputFile):
raise Exception("Input audio file not found!")
[Fs, x] = audioBasicIO.readAudioFile(inputFile) # read audio signal
segmentLimits = aS.silenceRemoval(x, Fs, 0.05, 0.05, smoothingWindow, weight, True) # get onsets
for i, s in enumerate(segmentLimits):
strOut = "{0:s}_{1:.3f}-{2:.3f}.wav".format(inputFile[0:-4], s[0], s[1])
wavfile.write( strOut, Fs, x[int(Fs*s[0]):int(Fs*s[1])])
def speakerDiarizationWrapper(inputFile, numSpeakers, useLDA):
if useLDA:
aS.speakerDiarization(inputFile, numSpeakers, PLOT = True);
else:
aS.speakerDiarization(inputFile, numSpeakers, LDAdim = 0, PLOT = True);
def thumbnailWrapper(inputFile, thumbnailWrapperSize):
stWindow = 1.0
stStep = 1.0
if not os.path.isfile(inputFile):
raise Exception("Input audio file not found!")
[Fs, x] = audioBasicIO.readAudioFile(inputFile) # read file
if Fs == -1: # could not read file
return
[A1, A2, B1, B2, Smatrix] = aS.musicThumbnailing(x, Fs, stWindow, stStep, thumbnailWrapperSize) # find thumbnailWrapper endpoints
# write thumbnailWrappers to WAV files:
thumbnailWrapperFileName1 = inputFile.replace(".wav","_thumb1.wav")
thumbnailWrapperFileName2 = inputFile.replace(".wav","_thumb2.wav")
wavfile.write(thumbnailWrapperFileName1, Fs, x[int(Fs*A1):int(Fs*A2)])
wavfile.write(thumbnailWrapperFileName2, Fs, x[int(Fs*B1):int(Fs*B2)])
print "1st thumbnailWrapper (stored in file {0:s}): {1:4.1f}sec -- {2:4.1f}sec".format(thumbnailWrapperFileName1, A1, A2)
print "2nd thumbnailWrapper (stored in file {0:s}): {1:4.1f}sec -- {2:4.1f}sec".format(thumbnailWrapperFileName2, B1, B2)
# Plot self-similarity matrix:
fig = plt.figure()
ax = fig.add_subplot(111, aspect="auto")
plt.imshow(Smatrix)
# Plot best-similarity diagonal:
Xcenter = (A1/stStep + A2/stStep) / 2.0
Ycenter = (B1/stStep + B2/stStep) / 2.0
e1 = matplotlib.patches.Ellipse((Ycenter, Xcenter), thumbnailWrapperSize * 1.4, 3,
angle=45, linewidth=3, fill=False)
ax.add_patch(e1)
plt.plot([B1, Smatrix.shape[0]], [A1, A1], color="k", linestyle="--", linewidth=2)
plt.plot([B2, Smatrix.shape[0]], [A2, A2], color="k", linestyle="--", linewidth=2)
plt.plot([B1, B1], [A1, Smatrix.shape[0]], color="k", linestyle="--", linewidth=2)
plt.plot([B2, B2], [A2, Smatrix.shape[0]], color="k", linestyle="--", linewidth=2)
plt.xlim([0, Smatrix.shape[0]])
plt.ylim([Smatrix.shape[1], 0])
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
plt.xlabel("frame no")
plt.ylabel("frame no")
plt.title("Self-similarity matrix")
plt.show()
def parse_arguments():
parser = argparse.ArgumentParser(description="A demonstration script for pyAudioAnalysis library")
tasks = parser.add_subparsers(
title="subcommands", description="available tasks", dest="task", metavar="")
dirMp3Wav = tasks.add_parser("dirMp3toWav", help="Convert .mp3 files in a directory to .wav format")
dirMp3Wav.add_argument("-i", "--input", required=True, help="Input folder")
dirMp3Wav.add_argument("-r", "--rate", type=int, choices=[8000, 16000, 32000, 44100],
required=True, help="Samplerate of generated WAV files")
dirMp3Wav.add_argument("-c", "--channels", type=int, choices=[1, 2],
required=True, help="Audio channels of generated WAV files")
dirWavRes = tasks.add_parser("dirWavResample", help="Change samplerate of .wav files in a directory")
dirWavRes.add_argument("-i", "--input", required=True, help="Input folder")
dirWavRes.add_argument("-r", "--rate", type=int, choices=[8000, 16000, 32000, 44100],
required=True, help="Samplerate of generated WAV files")
dirWavRes.add_argument("-c", "--channels", type=int, choices=[1, 2],
required=True, help="Audio channels of generated WAV files")
featExt = tasks.add_parser("featureExtractionFile", help="Extract audio features from file")
featExt.add_argument("-i", "--input", required=True, help="Input audio file")
featExt.add_argument("-o", "--output", required=True, help="Output file")
featExt.add_argument("-mw", "--mtwin", type=float, required=True, help="Mid-term window size")
featExt.add_argument("-ms", "--mtstep", type=float, required=True, help="Mid-term window step")
featExt.add_argument("-sw", "--stwin", type=float, default = 0.050, help="Short-term window size")
featExt.add_argument("-ss", "--ststep", type=float, default = 0.050, help="Short-term window step")
beat = tasks.add_parser("beatExtraction", help="Compute beat features of an audio file")
beat.add_argument("-i", "--input", required=True, help="Input audio file")
beat.add_argument("--plot", action="store_true", help="Generate plot")
featExtDir = tasks.add_parser("featureExtractionDir", help="Extract audio features from files in a folder")
featExtDir.add_argument("-i", "--input", required=True, help="Input directory")
featExtDir.add_argument("-mw", "--mtwin", type=float, required=True, help="Mid-term window size")
featExtDir.add_argument("-ms", "--mtstep", type=float, required=True, help="Mid-term window step")
featExtDir.add_argument("-sw", "--stwin", type=float, default = 0.050, help="Short-term window size")
featExtDir.add_argument("-ss", "--ststep", type=float, default = 0.050, help="Short-term window step")
featVis = tasks.add_parser("featureVisualization")
featVis.add_argument("-i", "--input", required=True, help="Input directory")
spectro = tasks.add_parser("fileSpectrogram")
spectro.add_argument("-i", "--input", required=True, help="Input audio file")
chroma = tasks.add_parser("fileChromagram")
chroma.add_argument("-i", "--input", required=True, help="Input audio file")
trainClass = tasks.add_parser("trainClassifier", help="Train an SVM or KNN classifier")
trainClass.add_argument("-i", "--input", nargs="+", required=True, help="Input directories")
trainClass.add_argument("--method", choices=["svm", "knn"], required=True, help="Classifier type")
trainClass.add_argument("--beat", action="store_true", help="Compute beat features")
trainClass.add_argument("-o", "--output", required=True, help="Generated classifier filename")
trainReg = tasks.add_parser("trainRegression")
trainReg.add_argument("-i", "--input", required=True, help="Input directory")
trainReg.add_argument("--method", choices=["svm", "knn"], required=True, help="Classifier type")
trainReg.add_argument("--beat", action="store_true", help="Compute beat features")
trainReg.add_argument("-o", "--output", required=True, help="Generated classifier filename")
classFile = tasks.add_parser("classifyFile", help="Classify a file using an existing classifier")
classFile.add_argument("-i", "--input", required=True, help="Input audio file")
classFile.add_argument("--model", choices=["svm", "knn"], required=True, help="Classifier type (svm or knn)")
classFile.add_argument("--classifier", required=True, help="Classifier to use (path)")
trainHMM = tasks.add_parser("trainHMMsegmenter_fromfile", help="Train an HMM from file + annotation data")
trainHMM.add_argument("-i", "--input", required=True, help="Input audio file")
trainHMM.add_argument("--ground", required=True, help="Ground truth path (segments CSV file)")
trainHMM.add_argument("-o", "--output", required=True, help="HMM model name (path)")
trainHMM.add_argument("-mw", "--mtwin", type=float, required=True, help="Mid-term window size")
trainHMM.add_argument("-ms", "--mtstep", type=float, required=True, help="Mid-term window step")
trainHMMDir = tasks.add_parser("trainHMMsegmenter_fromdir", help="Train an HMM from file + annotation data stored in a directory (batch)")
trainHMMDir.add_argument("-i", "--input", required=True, help="Input audio folder")
trainHMMDir.add_argument("-o", "--output", required=True, help="HMM model name (path)")
trainHMMDir.add_argument("-mw", "--mtwin", type=float, required=True, help="Mid-term window size")
trainHMMDir.add_argument("-ms", "--mtstep", type=float, required=True, help="Mid-term window step")
segmentClassifyFile = tasks.add_parser("segmentClassifyFile", help="Segmentation - classification of a WAV file given a trained SVM or kNN")
segmentClassifyFile.add_argument("-i", "--input", required=True, help="Input audio file")
segmentClassifyFile.add_argument("--model", choices=["svm", "knn"], required=True, help="Model type")
segmentClassifyFile.add_argument("--modelName", required=True, help="Model path")
segmentClassifyFileHMM = tasks.add_parser("segmentClassifyFileHMM", help="Segmentation - classification of a WAV file given a trained HMM")
segmentClassifyFileHMM.add_argument("-i", "--input", required=True, help="Input audio file")
segmentClassifyFileHMM.add_argument("--hmm", required=True, help="HMM Model to use (path)")
segmentationEvaluation = tasks.add_parser("segmentationEvaluation", help="Segmentation - classification evaluation for a list of WAV files and CSV ground-truth stored in a folder")
segmentationEvaluation.add_argument("-i", "--input", required=True, help="Input audio folder")
segmentationEvaluation.add_argument("--model", choices=["svm", "knn", "hmm"], required=True, help="Model type")
segmentationEvaluation.add_argument("--modelName", required=True, help="Model path")
regFile = tasks.add_parser("regressionFile")
regFile.add_argument("-i", "--input", required=True, help="Input audio file")
regFile.add_argument("--model", choices=["svm", "knn"], required=True, help="Regression type")
regFile.add_argument("--regression", required=True, help="Regression model to use")
classFolder = tasks.add_parser("classifyFolder")
classFolder.add_argument("-i", "--input", required=True, help="Input folder")
classFolder.add_argument("--model", choices=["svm", "knn"], required=True, help="Classifier type")
classFolder.add_argument("--classifier", required=True, help="Classifier to use (filename)")
classFolder.add_argument("--details", action="store_true", help="Plot details (otherwise only counts per class are shown)")
regFolder = tasks.add_parser("regressionFolder")
regFolder.add_argument("-i", "--input", required=True, help="Input folder")
regFolder.add_argument("--model", choices=["svm", "knn"], required=True, help="Classifier type")
regFolder.add_argument("--regression", required=True, help="Regression model to use")
silrem = tasks.add_parser("silenceRemoval", help="Remove silence segments from a recording")
silrem.add_argument("-i", "--input", required=True, help="input audio file")
silrem.add_argument("-s", "--smoothing", type=float, default=1.0, help="smoothing window size in seconds.")
silrem.add_argument("-w", "--weight", type=float, default=0.5, help="weight factor in (0, 1)")
spkrDir = tasks.add_parser("speakerDiarization")
spkrDir.add_argument("-i", "--input", required=True, help="Input audio file")
spkrDir.add_argument("-n", "--num", type=int, required=True, help="Number of speakers")
spkrDir.add_argument("--flsd", action="store_true", help="Enable FLsD method")
speakerDiarizationScriptEval = tasks.add_parser("speakerDiarizationScriptEval", help="Train an SVM or KNN classifier")
speakerDiarizationScriptEval.add_argument("-i", "--input", required=True, help="Input directory")
speakerDiarizationScriptEval.add_argument("--LDAs", type = int, nargs="+", required=True, help="List FLsD params")
thumb = tasks.add_parser("thumbnail", help="Generate a thumbnailWrapper for an audio file")
thumb.add_argument("-i", "--input", required=True, help="input audio file")
thumb.add_argument("-s", "--size", default=10.0, type=float, help="thumbnailWrapper size in seconds.")
return parser.parse_args()
def trainHMMsegmenter_fromdir(directory, hmmModelName, mtWin, mtStep):
if not os.path.isdir(directory):
raise Exception("Input folder not found!")
aS.trainHMM_fromDir(directory, hmmModelName, mtWin, mtStep)
def segmentclassifyFileWrapperHMM(wavFile, hmmModelName):
gtFile = wavFile.replace(".wav", ".segments");
aS.hmmSegmentation(wavFile, hmmModelName, PLOT = True, gtFileName = gtFile)
if __name__ == "__main__":
args = parse_arguments()
if args.task == "dirMp3toWav": # Convert mp3 to wav (batch - folder)
dirMp3toWavWrapper(args.input, args.rate, args.channels)
elif args.task == "dirWavResample": # Convert Fs for a list of wavs stored in a folder
dirWAVChangeFs(args.input, args.rate, args.channels)
elif args.task == "featureExtractionFile": # Feature extraction for WAV file
featureExtractionFileWrapper(args.input, args.output, args.mtwin, args.mtstep, args.stwin, args.ststep)
elif args.task == "featureExtractionDir": # Feature extraction for all WAV files stored in a folder
featureExtractionDirWrapper(args.input, args.mtwin, args.mtstep, args.stwin, args.ststep)
elif args.task == "fileSpectrogram": # Extract spectrogram from a WAV file
fileSpectrogramWrapper(args.input)
elif args.task == "fileChromagram": # Extract chromagram from a WAV file
fileChromagramWrapper(args.input)
elif args.task == "featureVisualization": # Visualize the content of a list of WAV files stored in a folder
featureVisualizationDirWrapper(args.input)
elif args.task == "beatExtraction": # Extract bpm from file
beatExtractionWrapper(args.input, args.plot)
elif args.task == "trainClassifier": # Train classifier from data (organized in folders)
trainClassifierWrapper(args.method, args.beat, args.input, args.output)
elif args.task == "trainRegression": # Train a regression model from data (organized in a single folder, while ground-truth is provided in a CSV)
trainRegressionWrapper(args.method, args.beat, args.input, args.output)
elif args.task == "classifyFile": # Apply audio classifier on audio file
classifyFileWrapper(args.input, args.model, args.classifier)
elif args.task == "trainHMMsegmenter_fromfile": # Train an hmm segmenter-classifier from WAV file + annotation
trainHMMsegmenter_fromfile(args.input, args.ground, args.output, args.mtwin, args.mtstep)
elif args.task == "trainHMMsegmenter_fromdir": # Train an hmm segmenter-classifier from a list of WAVs and annotations stored in a folder
trainHMMsegmenter_fromdir(args.input, args.output, args.mtwin, args.mtstep)
elif args.task == "segmentClassifyFile": # Apply a classifier (svm or knn) for segmentation-classificaiton to a WAV file
segmentclassifyFileWrapper(args.input, args.modelName, args.model)
elif args.task == "segmentClassifyFileHMM": # Apply an hmm for segmentation-classificaiton to a WAV file
segmentclassifyFileWrapperHMM(args.input, args.hmm)
elif args.task == "segmentationEvaluation": # Evaluate segmentation-classification for a list of WAV files (and ground truth CSVs) stored in a folder
segmentationEvaluation(args.input, args.modelName, args.model)
elif args.task == "regressionFile": # Apply a regression model to an audio signal stored in a WAV file
regressionFileWrapper(args.input, args.model, args.regression)
elif args.task == "classifyFolder": # Classify every WAV file in a given path
classifyFolderWrapper(args.input, args.model, args.classifier, args.details)
elif args.task == "regressionFolder": # Apply a regression model on every WAV file in a given path
regressionFolderWrapper(args.input, args.model, args.regression)
elif args.task == "silenceRemoval": # Detect non-silent segments in a WAV file and output to seperate WAV files
silenceRemovalWrapper(args.input, args.smoothing, args.weight)
elif args.task == "speakerDiarization": # Perform speaker diarization on a WAV file
speakerDiarizationWrapper(args.input, args.num, args.flsd)
elif args.task == "speakerDiarizationScriptEval": # Evaluate speaker diarization given a folder that contains WAV files and .segment (Groundtruth files)
aS.speakerDiarizationEvaluateScript(args.input, args.LDAs)
elif args.task == "thumbnail": # Audio thumbnailing
thumbnailWrapper(args.input, args.size)
| {
"content_hash": "b10534c7e35f14aaa07bd37abf295fb7",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 181,
"avg_line_length": 53.523148148148145,
"alnum_prop": 0.7237695701063922,
"repo_name": "bossjones/pyAudioAnalysis",
"id": "ddbe0486b0de6bbfa10cded2bc7fc6acb3484e07",
"size": "23148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "audioAnalysis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2167"
},
{
"name": "HTML",
"bytes": "4082"
},
{
"name": "Matlab",
"bytes": "1758"
},
{
"name": "Python",
"bytes": "150222"
},
{
"name": "Shell",
"bytes": "344"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from oslo_log import log as logging
import proboscis.asserts as asserts
from proboscis.dependencies import SkipTest
from trove.common import utils
from trove.tests.config import CONFIG
LOG = logging.getLogger(__name__)
MESSAGE_QUEUE = defaultdict(list)
def create_usage_verifier():
return utils.import_object(CONFIG.usage_endpoint)
class UsageVerifier(object):
def clear_events(self):
"""Hook that is called to allow endpoints to clean up."""
pass
def check_message(self, resource_id, event_type, **attrs):
messages = utils.poll_until(lambda: self.get_messages(resource_id),
lambda x: len(x) > 0, time_out=30)
found = None
for message in messages:
if message['event_type'] == event_type:
found = message
asserts.assert_is_not_none(found,
"No message type %s for resource %s" %
(event_type, resource_id))
with asserts.Check() as check:
for key, value in attrs.items():
check.equal(found[key], value)
def get_messages(self, resource_id, expected_messages=None):
global MESSAGE_QUEUE
msgs = MESSAGE_QUEUE.get(resource_id, [])
if expected_messages is not None:
asserts.assert_equal(len(msgs), expected_messages)
return msgs
class FakeVerifier(object):
"""This is the default handler in fake mode, it is basically a no-op."""
def clear_events(self):
pass
def check_message(self, *args, **kwargs):
raise SkipTest("Notifications not available")
def get_messages(self, *args, **kwargs):
pass
def notify(event_type, payload):
"""Simple test notify function which saves the messages to global list."""
payload['event_type'] = event_type
if 'instance_id' in payload and 'server_type' not in payload:
LOG.debug('Received Usage Notification: %s', event_type)
resource_id = payload['instance_id']
global MESSAGE_QUEUE
MESSAGE_QUEUE[resource_id].append(payload)
LOG.debug('Message Queue for %(id)s now has %(msg_count)d messages',
{'id': resource_id,
'msg_count': len(MESSAGE_QUEUE[resource_id])})
| {
"content_hash": "f7fb28ba56370d8cd0356d0bc13dc9b0",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 78,
"avg_line_length": 33.95652173913044,
"alnum_prop": 0.6205719163465643,
"repo_name": "zhangg/trove",
"id": "ae13ad831c7245d33c5dfe060f2ce9aa0d48c4bd",
"size": "2980",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trove/tests/util/usage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4546016"
},
{
"name": "Shell",
"bytes": "145524"
}
],
"symlink_target": ""
} |
import sys, os, string, subprocess
#aliasing the filenames using the labels
def run_command(command):
print "Running command: " + command
err_capture_file = open("my.stderr", 'w') # writing stderr to a file
cmd_run = subprocess.Popen(args=command, shell=True, stderr=err_capture_file, stdout=sys.stdout)
err = cmd_run.wait() # get exit code from command execution
err_capture_file.close()
if err:
# report the error messages we captured, and exit non-zero
sys.stderr.write("Error, cmd: " + command + " died with ret: " + `err`)
for line in open(err_capture_file):
sys.stderr.write(line)
sys.exit(err)
return
label_list = [] # symlink files to the labels
for i in range(1, len(sys.argv), 2):
filename=sys.argv[i]
label= sys.argv[i+1]
cmd= "ln -sf " + filename + " " + label
label_list.append(label)
run_command(cmd)
# run the abundance estimation script
cmd = os.path.dirname(sys.argv[0]) + "/trinityToolWrapper.py " + " util/abundance_estimates_to_matrix.pl --est_method RSEM --cross_sample_fpkm_norm none " + " ".join(label_list)
run_command(cmd)
sys.exit(0)
| {
"content_hash": "5a91db3c381c6a2f94800a62a5ebc46d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 178,
"avg_line_length": 30.789473684210527,
"alnum_prop": 0.6521367521367522,
"repo_name": "ssn1306/trinityrnaseq",
"id": "33df3876b7fb347fea155a3018da0b5b26eb6621",
"size": "1193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "galaxy-plugin/abundance_estimation_to_matrix_wrapper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "17771"
},
{
"name": "C++",
"bytes": "827939"
},
{
"name": "HTML",
"bytes": "6652"
},
{
"name": "Java",
"bytes": "750457"
},
{
"name": "Makefile",
"bytes": "22772"
},
{
"name": "Perl",
"bytes": "2062786"
},
{
"name": "PostScript",
"bytes": "1368935"
},
{
"name": "Python",
"bytes": "45138"
},
{
"name": "R",
"bytes": "132214"
},
{
"name": "Shell",
"bytes": "139563"
}
],
"symlink_target": ""
} |
"""An IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and prefixes.
"""
__version__ = '1.0.2'
import struct
from json import loads
from secure.gae import gaeinit
gaeinit()
from google.appengine.api import urlfetch
class PublicIP:
def __init__(self):
return
@staticmethod
def get():
return IP(loads(urlfetch.fetch('http://jsonip.com').content)['ip'])
class Error(Exception):
"""Base class for exceptions."""
class IPTypeError(Error):
"""Tried to perform a v4 action on v6 object or vice versa."""
class IPAddressExclusionError(Error):
"""An Error we should never see occurred in address exclusion."""
class IPv4IpValidationError(Error):
"""Raised when an IPv4 address is invalid."""
def __init__(self, ip):
Error.__init__(self)
self.ip = ip
def __str__(self):
return repr(self.ip) + ' is not a valid IPv4 address'
class IPv4NetmaskValidationError(Error):
"""Raised when a netmask is invalid."""
def __init__(self, netmask):
Error.__init__(self)
self.netmask = netmask
def __str__(self):
return repr(self.netmask) + ' is not a valid IPv4 netmask'
class IPv6IpValidationError(Error):
"""Raised when an IPv6 address is invalid."""
def __init__(self, ip):
Error.__init__(self)
self.ip = ip
def __str__(self):
return repr(self.ip) + ' is not a valid IPv6 address'
class IPv6NetmaskValidationError(Error):
"""Raised when an IPv6 netmask is invalid."""
def __init__(self, netmask):
Error.__init__(self)
self.netmask = netmask
def __str__(self):
return repr(self.netmask) + ' is not a valid IPv6 netmask'
class PrefixlenDiffInvalidError(Error):
"""Raised when Sub/Supernets is called with a bad prefixlen_diff."""
def __init__(self, error_str):
Error.__init__(self)
self.error_str = error_str
def IP(ipaddr):
"""Take an IP string/int and return an object of the correct type.
Args:
ipaddr: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4.
Returns:
An IPv4 or IPv6 object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
"""
try:
return IPv4(ipaddr)
except (IPv4IpValidationError, IPv4NetmaskValidationError):
pass
try:
return IPv6(ipaddr)
except (IPv6IpValidationError, IPv6NetmaskValidationError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
ipaddr)
def _collapse_address_list_recursive(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4('1.1.0.0/24')
ip2 = IPv4('1.1.1.0/24')
ip3 = IPv4('1.1.2.0/24')
ip4 = IPv4('1.1.3.0/24')
ip5 = IPv4('1.1.4.0/24')
ip6 = IPv4('1.1.0.1/22')
_collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4('1.1.0.0/22'), IPv4('1.1.4.0/24')]
This shouldn't be called directly; it is called via
collapse_address_list([]).
Args:
addresses: A list of IPv4 or IPv6 objects.
Returns:
A list of IPv4 or IPv6 objects depending on what we were passed.
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if cur_addr in ret_array[-1]:
optimized = True
elif cur_addr == ret_array[-1].supernet().subnet()[1]:
ret_array.append(ret_array.pop().supernet())
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return _collapse_address_list_recursive(ret_array)
return ret_array
def collapse_address_list(addresses):
"""Collapse a list of IP objects.
Example:
collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->
[IPv4('1.1.0.0/23')]
Args:
addresses: A list of IPv4 or IPv6 objects.
Returns:
A list of IPv4 or IPv6 objects depending on what we were passed.
"""
return _collapse_address_list_recursive(
sorted(addresses, key=BaseIP._get_networks_key))
class BaseIP(object):
"""A generic IP object.
This IP class contains most of the methods which are used by
the IPv4 and IPv6 classes.
"""
def __getitem__(self, n):
if n >= 0:
if self.network + n > self.broadcast:
raise IndexError
return self._string_from_ip_int(self.network + n)
else:
if self.broadcast + n < self.network:
raise IndexError
return self._string_from_ip_int(self.broadcast + n)
def __lt__(self, other):
try:
return (self.version < other.version
or self.ip < other.ip
or self.netmask < other.netmask)
except AttributeError:
return NotImplemented
def __gt__(self, other):
try:
return (self.version > other.version
or self.ip > other.ip
or self.netmask > other.netmask)
except AttributeError:
return NotImplemented
def __eq__(self, other):
try:
return (self.version == other.version
and self.ip == other.ip
and self.netmask == other.netmask)
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def __index__(self):
return self.ip
def __int__(self):
return self.ip
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = IP('10.1.1.0/24')
addr2 = IP('10.1.1.0/26')
addr1.address_exclude(addr2) =
[IP('10.1.1.64/26'), IP('10.1.1.128/25')]
or IPv6:
addr1 = IP('::1/32')
addr2 = IP('::1/128')
addr1.address_exclude(addr2) = [IP('::0/128'),
IP('::2/127'),
IP('::4/126'),
IP('::8/125'),
...
IP('0:0:8000::/33')]
Args:
other: An IP object of the same type.
Returns:
A sorted list of IP objects addresses which is self minus
other.
Raises:
IPTypeError: If self and other are of difffering address
versions.
IPAddressExclusionError: There was some unknown error in the
address exclusion process. This likely points to a bug
elsewhere in this code.
ValueError: If other is not completely contained by self.
"""
if not self.version == other.version:
raise IPTypeError("%s and %s aren't of the same version" % (
str(self), str(other)))
if other not in self:
raise ValueError('%s not contained in %s' % (str(other),
str(self)))
ret_addrs = []
# Make sure we're comparing the network of other.
other = IP(other.network_ext + '/' + str(other.prefixlen))
s1, s2 = self.subnet()
while s1 != other and s2 != other:
if other in s1:
ret_addrs.append(s2)
s1, s2 = s1.subnet()
elif other in s2:
ret_addrs.append(s1)
s1, s2 = s2.subnet()
else:
# If we got here, there's a bug somewhere.
raise IPAddressExclusionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
if s1 == other:
ret_addrs.append(s2)
elif s2 == other:
ret_addrs.append(s1)
else:
# If we got here, there's a bug somewhere.
raise IPAddressExclusionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
return sorted(ret_addrs, key=BaseIP._get_networks_key)
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA.ip < HostB.ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4('1.1.1.0/24') < IPv4('1.1.2.0/24')
IPv6('1080::200C:417A') < IPv6('1080::200B:417B')
0 if self == other
eg: IPv4('1.1.1.1/24') == IPv4('1.1.1.2/24')
IPv6('1080::200C:417A/96') == IPv6('1080::200C:417B/96')
1 if self > other
eg: IPv4('1.1.1.0/24') > IPv4('1.1.0.0/24')
IPv6('1080::1:200C:417A/112') >
IPv6('1080::0:200C:417A/112')
If the IP versions of self and other are different, returns:
-1 if self.version < other.version
eg: IPv4('10.0.0.1/24') < IPv6('::1/128')
1 if self.version > other.version
eg: IPv6('::1/128') > IPv4('255.255.255.0/24')
"""
if self.version < other.version:
return -1
if self.version > other.version:
return 1
# self.version == other.version below here:
if self.network < other.network:
return -1
if self.network > other.network:
return 1
# self.network == other.network below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
# self.network == other.network and self.netmask == other.netmask
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self.version, self.network, self.netmask)
prefixlen = property(
fget=lambda self: self._prefixlen,
fset=lambda self, prefixlen: self._set_prefix(prefixlen))
def __str__(self):
return '%s/%s' % (self._string_from_ip_int(self.ip),
str(self.prefixlen))
def __hash__(self):
return hash(self.ip ^ self.netmask)
def __contains__(self, other):
return self.network <= other.ip and self.broadcast >= other.broadcast
@property
def ip_ext(self):
"""Dotted decimal or colon string version of the IP address."""
return self._string_from_ip_int(self.ip)
@property
def ip_ext_full(self):
"""Canonical string version of the IP address."""
return self.ip_ext
@property
def broadcast(self):
"""Integer representation of the broadcast address."""
return self.ip | self.hostmask
@property
def broadcast_ext(self):
"""Dotted decimal or colon string version of the broadcast."""
return self._string_from_ip_int(self.broadcast)
@property
def hostmask(self):
"""Integer representation of the hostmask."""
return self.netmask ^ self._ALL_ONES
@property
def hostmask_ext(self):
"""Dotted decimal or colon string version of the hostmask."""
return self._string_from_ip_int(self.hostmask)
@property
def network(self):
"""Integer representation of the network."""
return self.ip & self.netmask
@property
def network_ext(self):
"""Dotted decimal or colon string version of the network."""
return self._string_from_ip_int(self.network)
@property
def netmask_ext(self):
"""Dotted decimal or colon string version of the netmask."""
return self._string_from_ip_int(self.netmask)
@property
def numhosts(self):
"""Number of hosts in the current subnet."""
return self.broadcast - self.network + 1
@property
def version(self):
raise NotImplementedError('BaseIP has no version')
def _ip_int_from_prefix(self, prefixlen=None):
"""Turn the prefix length netmask into a int for comparison.
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
if not prefixlen and prefixlen != 0:
prefixlen = self.prefixlen
return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
def _prefix_from_ip_int(self, ip_int, mask=32):
"""Return prefix length from the decimal netmask.
Args:
ip_int: An integer, the IP address.
mask: The netmask. Defaults to 32.
Returns:
An integer, the prefix length.
"""
while mask:
if ip_int & 1 == 1:
break
ip_int >>= 1
mask -= 1
return mask
def _ip_string_from_prefix(self, prefixlen=None):
"""Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
"""
if not prefixlen:
prefixlen = self.prefixlen
return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
class IPv4(BaseIP):
"""This class represents and manipulates 32-bit IPv4 addresses.
Attributes: [examples for IPv4('1.2.3.4/27')]
.ip: 16909060
.ip_ext: '1.2.3.4'
.ip_ext_full: '1.2.3.4'
.network: 16909056L
.network_ext: '1.2.3.0'
.hostmask: 31L (0x1F)
.hostmask_ext: '0.0.0.31'
.broadcast: 16909087L (0x102031F)
.broadcast_ext: '1.2.3.31'
.netmask: 4294967040L (0xFFFFFFE0)
.netmask_ext: '255.255.255.224'
.prefixlen: 27
"""
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = 0xffffffff
def __init__(self, ipaddr):
"""Instantiate a new IPv4 object.
Args:
ipaddr: A string or integer representing the IP [& network].
'192.168.1.1/32'
'192.168.1.1/255.255.255.255'
'192.168.1.1/0.0.0.255'
'192.168.1.1'
are all functionally the same in IPv4. That is to say,
failing to provide a subnetmask will create an object with
a mask of /32. A netmask of '255.255.255.255' is assumed
to be /32 and '0.0.0.0' is assumed to be /0, even though
other netmasks can be expressed both as host- and
net-masks. (255.0.0.0 == 0.255.255.255)
Additionally, an integer can be passed, so
IPv4('192.168.1.1') == IPv4(3232235777).
or, more generally
IPv4(IPv4('192.168.1.1').ip) == IPv4('192.168.1.1')
Raises:
IPv4IpValidationError: If ipaddr isn't a valid IPv4 address.
IPv4NetmaskValidationError: If the netmask isn't valid for
an IPv4 address.
"""
BaseIP.__init__(self)
self._version = 4
# Efficient constructor from integer.
if isinstance(ipaddr, int):
self.ip = ipaddr
self._prefixlen = 32
self.netmask = self._ALL_ONES
if ipaddr < 0 or ipaddr > self._ALL_ONES:
raise IPv4IpValidationError(ipaddr)
return
# Constructing from a packed address
if isinstance(ipaddr, (bytes, bytearray)) and len(ipaddr) == 4:
self.ip = struct.unpack('!I', ipaddr)[0]
self._prefixlen = 32
self.netmask = self._ALL_ONES
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(ipaddr).split('/')
if len(addr) > 2:
raise IPv4IpValidationError(ipaddr)
if not self._is_valid_ip(addr[0]):
raise IPv4IpValidationError(addr[0])
self.ip = self._ip_int_from_string(addr[0])
if len(addr) == 2:
mask = addr[1].split('.')
if len(mask) == 4:
# We have dotted decimal netmask.
if not self._is_valid_netmask(addr[1]):
raise IPv4NetmaskValidationError(addr[1])
if self._is_hostmask(addr[1]):
self.netmask = (
self._ip_int_from_string(addr[1]) ^ self._ALL_ONES)
else:
self.netmask = self._ip_int_from_string(addr[1])
self._prefixlen = self._prefix_from_ip_int(self.netmask)
else:
# We have a netmask in prefix length form.
if not self._is_valid_netmask(addr[1]):
raise IPv4NetmaskValidationError(addr[1])
self._prefixlen = int(addr[1])
self.netmask = self._ip_int_from_prefix(self._prefixlen)
else:
self._prefixlen = 32
self.netmask = self._ip_int_from_prefix(self._prefixlen)
def _set_prefix(self, prefixlen):
"""Change the prefix length.
Args:
prefixlen: An integer, the new prefix length.
Raises:
IPv4NetmaskValidationError: If prefixlen is out of bounds.
"""
if not 0 <= prefixlen <= 32:
raise IPv4NetmaskValidationError(prefixlen)
self._prefixlen = prefixlen
self.netmask = self._ip_int_from_prefix(self._prefixlen)
def subnet(self, prefixlen_diff=1):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32), return a list with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. Given a /24 network and a
prefixlen_diff of 3, for example, 8 subnets of size /27
will be returned. The default value of 1 splits the
current network into two halves.
Returns:
A list of IPv4 objects.
Raises:
PrefixlenDiffInvalidError: The prefixlen_diff is too small
or too large.
"""
if self._prefixlen == 32:
return [self]
if prefixlen_diff < 0:
raise PrefixlenDiffInvalidError('prefix length diff must be > 0')
new_prefixlen = self.prefixlen + prefixlen_diff
if not self._is_valid_netmask(str(new_prefixlen)):
raise PrefixlenDiffInvalidError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, str(self)))
first = IPv4(
self._string_from_ip_int(self.network) + '/' +
str(self._prefixlen + prefixlen_diff))
subnets = [first]
current = first
while True:
broadcast = current.broadcast
if broadcast == self.broadcast:
break
current = IPv4(self._string_from_ip_int(broadcast + 1) + '/' +
str(new_prefixlen))
subnets.append(current)
return subnets
def supernet(self, prefixlen_diff=1):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 object.
Raises:
PrefixlenDiffInvalidError: If
self.prefixlen - prefixlen_diff < 0. I.e., you have a
negative prefix length.
"""
if self.prefixlen == 0:
return self
if self.prefixlen - prefixlen_diff < 0:
raise PrefixlenDiffInvalidError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return IPv4(self.ip_ext + '/' + str(self.prefixlen - prefixlen_diff))
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 1918.
"""
return (self in IPv4('10.0.0.0/8') or
self in IPv4('172.16.0.0/12') or
self in IPv4('192.168.0.0/16'))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in IPv4('224.0.0.0/4')
@property
def is_loopback(self):
"""Test if the address is a loopback adddress.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in IPv4('127.0.0.0/8')
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in IPv4('169.254.0.0/16')
@property
def version(self):
return self._version
@property
def packed(self):
"""The binary representation of this address."""
return struct.pack('!I', self.ip)
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
parts = [int(x) for x in ip_str.split('.')]
if parts[0] < parts[-1]:
return True
return False
def _ip_int_from_string(self, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP address.
Returns:
The IP address as an integer.
"""
packed_ip = 0
for oc in ip_str.split('.'):
packed_ip = (packed_ip << 8) | int(oc)
return packed_ip
def _string_from_ip_int(self, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
octets = []
for _ in range(4):
octets.insert(0, str(ip_int & 0xFF))
ip_int >>= 8
return '.'.join(octets)
def _is_valid_ip(self, ip_str):
"""Validate the dotted decimal notation IP/netmask string.
Args:
ip_str: A string, the IP address.
Returns:
A boolean, True if the string is a valid dotted decimal IP
string.
"""
octets = ip_str.split('.')
if len(octets) == 1:
# We have an integer rather than a dotted decimal IP.
try:
return int(ip_str) >= 0 and int(ip_str) <= self._ALL_ONES
except ValueError:
return False
if len(octets) != 4:
return False
for octet in octets:
try:
if not 0 <= int(octet) <= 255:
return False
except ValueError:
return False
return True
def _is_valid_netmask(self, netmask):
"""Verify that the netmask is valid.
Args:
netmask: A string, either a prefix or dotted decimal
netmask.
Returns:
A boolean, True if the prefix represents a valid IPv4
netmask.
"""
if len(netmask.split('.')) == 4:
return self._is_valid_ip(netmask)
try:
netmask = int(netmask)
except ValueError:
return False
return 0 <= netmask <= 32
class IPv6(BaseIP):
"""This class respresents and manipulates 128-bit IPv6 addresses.
Attributes: [examples for IPv6('2001:658:22A:CAFE:200::1/64')]
.ip: 42540616829182469433547762482097946625L
.ip_ext: '2001:658:22a:cafe:200::1'
.ip_ext_full: '2001:0658:022a:cafe:0200:0000:0000:0001'
.network: 42540616829182469433403647294022090752L
.network_ext: '2001:658:22a:cafe::'
.hostmask: 18446744073709551615L
.hostmask_ext: '::ffff:ffff:ffff:ffff'
.broadcast: 42540616829182469451850391367731642367L
.broadcast_ext: '2001:658:22a:cafe:ffff:ffff:ffff:ffff'
.netmask: 340282366920938463444927863358058659840L
.netmask_ext: 64
.prefixlen: 64
"""
_ALL_ONES = (2**128) - 1
def __init__(self, ipaddr):
"""Instantiate a new IPv6 object.
Args:
ipaddr: A string or integer representing the IP or the IP
and prefix/netmask.
'2001:4860::/128'
'2001:4860:0000:0000:0000:0000:0000:0000/128'
'2001:4860::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6('2001:4860::') ==
IPv6(42541956101370907050197289607612071936L).
or, more generally
IPv6(IPv6('2001:4860::').ip) == IPv6('2001:4860::')
Raises:
IPv6IpValidationError: If ipaddr isn't a valid IPv6 address.
IPv6NetmaskValidationError: If the netmask isn't valid for
an IPv6 address.
"""
BaseIP.__init__(self)
self._version = 6
# Efficient constructor from integer.
if isinstance(ipaddr, int):
self.ip = ipaddr
self._prefixlen = 128
self.netmask = self._ALL_ONES
if ipaddr < 0 or ipaddr > self._ALL_ONES:
raise IPv6IpValidationError(ipaddr)
return
# Constructing from a packed address
if isinstance(ipaddr, (bytes, bytearray)) and len(ipaddr) == 16:
tmp = struct.unpack('!QQ', ipaddr)
self.ip = (tmp[0] << 64) | tmp[1]
self._prefixlen = 128
self.netmask = self._ALL_ONES
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr_str = str(ipaddr)
if not addr_str:
raise IPv6IpValidationError('')
addr = addr_str.split('/')
if len(addr) > 1:
if self._is_valid_netmask(addr[1]):
self._prefixlen = int(addr[1])
else:
raise IPv6NetmaskValidationError(addr[1])
else:
self._prefixlen = 128
self.netmask = self._ip_int_from_prefix(self._prefixlen)
if not self._is_valid_ip(addr[0]):
raise IPv6IpValidationError(addr[0])
self.ip = self._ip_int_from_string(addr[0])
@property
def ip_ext_full(self):
"""Returns the expanded version of the IPv6 string."""
return self._explode_shorthand_ip_string(self.ip_ext)
def _set_prefix(self, prefixlen):
"""Change the prefix length.
Args:
prefixlen: An integer, the new prefix length.
Raises:
IPv6NetmaskValidationError: If prefixlen is out of bounds.
"""
if not 0 <= prefixlen <= 128:
raise IPv6NetmaskValidationError(prefixlen)
self._prefixlen = prefixlen
self.netmask = self._ip_int_from_prefix(self.prefixlen)
def subnet(self, prefixlen_diff=1):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 128), return a list with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by.
Returns:
A list of IPv6 objects.
Raises:
PrefixlenDiffInvalidError: The prefixlen_diff is too small
or too large.
"""
# Preserve original functionality (return [self] if
# self.prefixlen == 128).
if self.prefixlen == 128:
return [self]
if prefixlen_diff < 0:
raise PrefixlenDiffInvalidError('Prefix length diff must be > 0')
new_prefixlen = self.prefixlen + prefixlen_diff
if not self._is_valid_netmask(str(new_prefixlen)):
raise PrefixlenDiffInvalidError(
'Prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, str(self)))
first = IPv6(
self._string_from_ip_int(self.network) + '/' +
str(self._prefixlen + prefixlen_diff))
subnets = [first]
current = first
while True:
broadcast = current.broadcast
if current.broadcast == self.broadcast:
break
current = IPv6(self._string_from_ip_int(broadcast + 1) + '/' +
str(new_prefixlen))
subnets.append(current)
return subnets
def supernet(self, prefixlen_diff=1):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of the
network should be decreased by. For example, given a /96
network and a prefixlen_diff of 3, a supernet with a /93
netmask is returned.
Returns:
An IPv6 object.
Raises:
PrefixlenDiffInvalidError: If
self._prefixlen - prefixlen_diff < 0. I.e., you have a
negative prefix length.
"""
if self.prefixlen == 0:
return self
if self.prefixlen - prefixlen_diff < 0:
raise PrefixlenDiffInvalidError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return IPv6(self.ip_ext + '/' + str(self.prefixlen - prefixlen_diff))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in IPv6('ff00::/8')
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self == IPv6('::')
@property
def is_loopback(self):
"""Test if the address is a loopback adddress.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self == IPv6('::1')
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in IPv6('fe80::/10')
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in IPv6('fec0::/10')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 4193.
"""
return self in IPv6('fc00::/7')
@property
def version(self):
return self._version
@property
def packed(self):
"""The binary representation of this address."""
return struct.pack('!QQ', self.ip >> 64, self.ip & (2**64 - 1))
def _is_shorthand_ip(self, ip_str=None):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
return False
def _explode_shorthand_ip_string(self, ip_str):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if self._is_shorthand_ip(ip_str):
new_ip = []
hextet = ip_str.split('::')
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for _ in range(8 - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
# We've already got a longhand ip_str.
return ip_str
def _is_valid_ip(self, ip_str=None):
"""Ensure we have a valid IPv6 address.
Probably not as exhaustive as it should be.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
if not ip_str:
ip_str = self.ip_ext
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = self._explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to be
# at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
IPv4(hextet)
except IPv4IpValidationError:
return False
elif int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
return True
def _is_valid_netmask(self, prefixlen):
"""Verify that the netmask/prefixlen is valid.
Args:
prefixlen: A string, the netmask in prefix length format.
Returns:
A boolean, True if the prefix represents a valid IPv6
netmask.
"""
try:
prefixlen = int(prefixlen)
except ValueError:
return False
return 0 <= prefixlen <= 128
def _ip_int_from_string(self, ip_str=None):
"""Turn an IPv6 address into an integer.
Args:
ip_str: A string, the IPv6 address.
Returns:
A long, the IPv6 address.
"""
if not ip_str:
ip_str = self.ip_ext
ip_int = 0
fields = self._explode_shorthand_ip_string(ip_str).split(':')
# Do we have an IPv4 mapped (::ffff:a.b.c.d) or compact (::a.b.c.d)
# address?
if fields[-1].count('.') == 3:
ipv4_string = fields.pop()
ipv4_int = IPv4(ipv4_string).ip
octets = []
for _ in range(2):
octets.append(hex(ipv4_int & 0xFFFF).lstrip('0x').rstrip('L'))
ipv4_int >>= 16
fields.extend(reversed(octets))
for field in fields:
ip_int = (ip_int << 16) + int(field, 16)
return ip_int
def _compress_hextets(self, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index in range(len(hextets)):
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
def _string_from_ip_int(self, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if not ip_int and ip_int != 0:
ip_int = self.ip
if ip_int > self._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = []
for x in range(0, 32, 4):
hextets.append('%x' % int(hex_str[x:x+4], 16))
hextets = self._compress_hextets(hextets)
return ':'.join(hextets)
@property
def netmask_ext(self):
"""IPv6 extended netmask.
We don't deal with netmasks in IPv6 like we do in IPv4. This is
here strictly for IPv4 compatibility. We simply return the
prefix length.
Returns:
An integer.
"""
return self.prefixlen | {
"content_hash": "8c577563f7acbcfcee2f708c2a80bc4a",
"timestamp": "",
"source": "github",
"line_count": 1346,
"max_line_length": 79,
"avg_line_length": 30.436849925705793,
"alnum_prop": 0.5462800234329233,
"repo_name": "bretlowery/snakr",
"id": "0c695e11c47c17e1ab7511a34dbd2aaabc730d2e",
"size": "41017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snakr/ipaddr.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "45763"
},
{
"name": "HTML",
"bytes": "70189"
},
{
"name": "JavaScript",
"bytes": "121904"
},
{
"name": "Makefile",
"bytes": "91"
},
{
"name": "PLpgSQL",
"bytes": "20088"
},
{
"name": "Python",
"bytes": "5592645"
},
{
"name": "Shell",
"bytes": "759"
}
],
"symlink_target": ""
} |
"""CLI command definitions."""
import os
import sys
import click
from moduleultra import *
from gimme_input import *
from yaml import dump as ydump
from .daemon import daemon
version = {}
version_path = os.path.join(os.path.dirname(__file__), '../version.py')
with open(version_path) as version_file:
exec(version_file.read(), version)
@click.group()
@click.version_option(version['__version__'])
def main():
pass
main.add_command(daemon)
@main.command()
def init():
try:
ModuleUltraConfig.initConfig()
except ModuleUltraConfigAlreadyExists:
pass # this is fine
try:
ModuleUltraRepo.initRepo()
except ModuleUltraRepoAlreadyExists:
print('Repo already exists.', file=sys.stderr)
@main.group()
def config():
pass
@config.command(name='cluster_submit')
@click.argument('script')
def setSubmitScript(script):
muConfig = ModuleUltraConfig.load()
muConfig.setClusterSubmitScript(script)
###############################################################################
@main.group()
def add():
pass
@add.command(name='pipeline')
@click.option('-v', '--version', default=None, type=str)
@click.option('--modify/--no-modify', default=False)
@click.argument('name', nargs=1)
def addPipeline(version, modify, name):
repo = ModuleUltraRepo.loadRepo()
try:
repo.addPipeline(name, version=version, modify=modify)
except errors.PipelineAlreadyInRepoError:
print('{} is already in this repo.'.format(name), file=sys.stderr)
###############################################################################
@main.command(name='install')
@click.option('--dev/--normal', default=False)
@click.argument('uri', nargs=1)
def installPipeline(uri, dev=False):
muConfig = ModuleUltraConfig.load()
try:
muConfig.installPipeline(uri, dev=dev)
except PipelineAlreadyInstalledError:
print('Pipeline already installed.', file=sys.stderr)
@main.command(name='uninstall')
@click.option('-v', '--version', default=None, type=str)
@click.argument('name', nargs=1)
def uninstallPipeline(name, version=None):
muConfig = ModuleUltraConfig.load()
muConfig.uninstallPipeline(name, version=version)
@main.command(name='reinstall')
@click.option('-v', '--version', default=None, type=str)
@click.option('--dev/--normal', default=False)
@click.argument('name', nargs=1)
@click.argument('uri', nargs=1)
def reinstallPipeline(name, uri, version=None, dev=False):
muConfig = ModuleUltraConfig.load()
try:
muConfig.uninstallPipeline(name, version=version)
except KeyError:
pass # pipeline not installed
muConfig.installPipeline(uri, dev=dev)
###############################################################################
@main.command(name='run')
@click.option('-p', '--pipeline', default=None, type=str)
@click.option('-v', '--version', default=None, type=str)
@click.option('-c', '--local-config', default=None, type=str)
@click.option('--sample-list', default=None, type=click.File('r'))
@click.option('--choose-endpts/--all-endpts', default=False)
@click.option('--choose-exclude-endpts/--no-exclude-endpts', default=False)
@click.option('--exclude-endpts', default='', type=str, help='list of comma-separated names')
@click.option('--choose/--all', default=False)
@click.option('--local/--cluster', default=True)
@click.option('--dryrun/--wetrun', default=False)
@click.option('--unlock/--no-unlock', default=False)
@click.option('--compact/--logger', default=False)
@click.option('--benchmark/--no-benchmark', default=False)
@click.option('-j', '--jobs', default=1)
def runPipe(pipeline, version, local_config, sample_list,
choose_endpts, choose_exclude_endpts, exclude_endpts, choose,
local, dryrun, unlock, compact, benchmark, jobs):
repo = ModuleUltraRepo.loadRepo()
if pipeline is None:
pipeline = UserChoice('pipeline', repo.listPipelines()).resolve()
pipe = repo.getPipelineInstance(pipeline, version=version)
print('Running {} :: {}'.format(pipe.pipelineName, pipe.pipelineVersion))
dsRepo = repo.datasuperRepo()
# select sets
endpts = False
if choose_endpts:
endpts = UserMultiChoice('What end points should be evaluated?',
pipe.listEndpoints()).resolve()
excludedEndpts = exclude_endpts.split(',')
if choose_exclude_endpts:
excludedEndpts = UserMultiChoice('What end points should NOT be evaluated?',
pipe.listEndpoints()).resolve()
groups = None
inp = BoolUserInput('Process data from specific sample groups?', False)
if choose and inp.resolve():
groups = UserMultiChoice('What sample groups should be processed?',
dsRepo.db.sampleGroupTable.getAll(),
display=lambda x: x.name).resolve()
if sample_list:
samples = [line.strip() for line in sample_list if line.strip()]
all_samples = {sample.name for sample in dsRepo.db.sampleTable.getAll()}
samples = [sample for sample in samples if sample in all_samples]
else:
samples = None
inp = BoolUserInput('Process data from a specific samples?', False)
if choose and inp.resolve():
if groups is not None:
samplesToChooseFrom = []
for group in groups:
samplesToChooseFrom += group.samples()
else:
samplesToChooseFrom = dsRepo.db.sampleTable.getAll()
samples = UserMultiChoice('What samples should data be taken from?',
samplesToChooseFrom,
display=lambda x: x.name).resolve()
# run the pipeline
pipe.run(endpts=endpts, excludeEndpts=excludedEndpts,
groups=groups, samples=samples, dryrun=dryrun,
unlock=unlock, local=local, jobs=jobs,
custom_config_file=local_config, compact_logger=compact,
benchmark=benchmark)
###############################################################################
@main.group(name='view')
def view():
pass
@view.command(name='pipelines')
@click.option('--installed/--local', default=False)
def viewPipelines(installed):
if installed:
msg = '''
# Showing all pipelines currently installed
#
# to add a pipeline to a repo navigate to the repo and run
# moduleultra add <pipeline name>
'''
muConfig = ModuleUltraConfig.load()
for pName, versions in muConfig.listInstalledPipelines().items():
vs = ' '.join([str(el) for el in versions])
print('{} :: {}'.format(pName, vs))
else:
msg = '''
# Showing pipelines currently in this repo
# to see all installed pipelines use '--installed' flag
'''
print(msg)
repo = ModuleUltraRepo.loadRepo()
for pName in repo.listPipelines():
print(pName)
###############################################################################
@view.group(name='detail')
def detail():
pass
@detail.command(name='pipeline')
@click.option('-v', '--version', default=None, type=str)
@click.argument('name', nargs=1)
def detailPipeline(version, name):
repo = ModuleUltraRepo.loadRepo()
pipe = repo.getPipelineInstance(name, version=version)
out = {
'origins': pipe.listOrigins(),
'endpoints': pipe.listEndpoints(),
}
click.echo(ydump(out))
if __name__ == '__main__':
main()
| {
"content_hash": "47592757c228649d89911dd9250c3785",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 93,
"avg_line_length": 32.53448275862069,
"alnum_prop": 0.6157922628510863,
"repo_name": "MetaSUB/ModuleUltra",
"id": "09b43021feafc48ef17c6021d5698dfe01c113ea",
"size": "7548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moduleultra/cli/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43847"
}
],
"symlink_target": ""
} |
"""
WSGI config for truffe2 project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "app.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
from generic.startup import startup
startup()
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "9250af4629fa04c29e652dfb431c0ef9",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 79,
"avg_line_length": 41.74285714285714,
"alnum_prop": 0.7926078028747433,
"repo_name": "ArcaniteSolutions/truffe2",
"id": "fc7ffca491eb3bdb6adf3ae7afa54024fe09c46e",
"size": "1461",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "truffe2/app/wsgi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "552855"
},
{
"name": "HTML",
"bytes": "742372"
},
{
"name": "JavaScript",
"bytes": "1859724"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3048852"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import awx.main.fields
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
from django.utils.timezone import now
import jsonfield.fields
import taggit.managers
def create_system_job_templates(apps, schema_editor):
'''
Create default system job templates if not present. Create default schedules
only if new system job templates were created (i.e. new database).
'''
SystemJobTemplate = apps.get_model('main', 'SystemJobTemplate')
Schedule = apps.get_model('main', 'Schedule')
ContentType = apps.get_model('contenttypes', 'ContentType')
sjt_ct = ContentType.objects.get_for_model(SystemJobTemplate)
now_dt = now()
now_str = now_dt.strftime('%Y%m%dT%H%M%SZ')
sjt, created = SystemJobTemplate.objects.get_or_create(
job_type='cleanup_jobs',
defaults=dict(
name='Cleanup Job Details',
description='Remove job history',
created=now_dt,
modified=now_dt,
polymorphic_ctype=sjt_ct,
),
)
if created:
sched = Schedule(
name='Cleanup Job Schedule',
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU' % now_str,
description='Automatically Generated Schedule',
enabled=True,
extra_data={'days': '120'},
created=now_dt,
modified=now_dt,
)
sched.unified_job_template = sjt
sched.save()
existing_cd_jobs = SystemJobTemplate.objects.filter(job_type='cleanup_deleted')
Schedule.objects.filter(unified_job_template__in=existing_cd_jobs).delete()
existing_cd_jobs.delete()
sjt, created = SystemJobTemplate.objects.get_or_create(
job_type='cleanup_activitystream',
defaults=dict(
name='Cleanup Activity Stream',
description='Remove activity stream history',
created=now_dt,
modified=now_dt,
polymorphic_ctype=sjt_ct,
),
)
if created:
sched = Schedule(
name='Cleanup Activity Schedule',
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=TU' % now_str,
description='Automatically Generated Schedule',
enabled=True,
extra_data={'days': '355'},
created=now_dt,
modified=now_dt,
)
sched.unified_job_template = sjt
sched.save()
sjt, created = SystemJobTemplate.objects.get_or_create(
job_type='cleanup_facts',
defaults=dict(
name='Cleanup Fact Details',
description='Remove system tracking history',
created=now_dt,
modified=now_dt,
polymorphic_ctype=sjt_ct,
),
)
if created:
sched = Schedule(
name='Cleanup Fact Schedule',
rrule='DTSTART:%s RRULE:FREQ=MONTHLY;INTERVAL=1;BYMONTHDAY=1' % now_str,
description='Automatically Generated Schedule',
enabled=True,
extra_data={'older_than': '120d', 'granularity': '1w'},
created=now_dt,
modified=now_dt,
)
sched.unified_job_template = sjt
sched.save()
class Migration(migrations.Migration):
replaces = [('main', '0002_v300_tower_settings_changes'),
('main', '0003_v300_notification_changes'),
('main', '0004_v300_fact_changes'),
('main', '0005_v300_migrate_facts'),
('main', '0006_v300_active_flag_cleanup'),
('main', '0007_v300_active_flag_removal'),
('main', '0008_v300_rbac_changes'),
('main', '0009_v300_rbac_migrations'),
('main', '0010_v300_create_system_job_templates'),
('main', '0011_v300_credential_domain_field'),
('main', '0012_v300_create_labels'),
('main', '0013_v300_label_changes'),
('main', '0014_v300_invsource_cred'),
('main', '0015_v300_label_changes'),
('main', '0016_v300_prompting_changes'),
('main', '0017_v300_prompting_migrations'),
('main', '0018_v300_host_ordering'),
('main', '0019_v300_new_azure_credential'),]
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0001_initial'),
]
operations = [
# Tower settings changes
migrations.CreateModel(
name='TowerSettings',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('key', models.CharField(unique=True, max_length=255)),
('description', models.TextField()),
('category', models.CharField(max_length=128)),
('value', models.TextField(blank=True)),
('value_type', models.CharField(max_length=12, choices=[('string', 'String'), ('int', 'Integer'), ('float', 'Decimal'), ('json', 'JSON'), ('bool', 'Boolean'), ('password', 'Password'), ('list', 'List')])),
('user', models.ForeignKey(related_name='settings', default=None, editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)),
],
),
# Notification changes
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('status', models.CharField(default='pending', max_length=20, editable=False, choices=[('pending', 'Pending'), ('successful', 'Successful'), ('failed', 'Failed')])),
('error', models.TextField(default='', editable=False, blank=True)),
('notifications_sent', models.IntegerField(default=0, editable=False)),
('notification_type', models.CharField(max_length=32, choices=[('email', 'Email'), ('slack', 'Slack'), ('twilio', 'Twilio'), ('pagerduty', 'Pagerduty'), ('hipchat', 'HipChat'), ('webhook', 'Webhook'), ('mattermost', 'Mattermost'), ('rocketchat', 'Rocket.Chat'), ('irc', 'IRC')])),
('recipients', models.TextField(default='', editable=False, blank=True)),
('subject', models.TextField(default='', editable=False, blank=True)),
('body', jsonfield.fields.JSONField(default=dict, blank=True)),
],
options={
'ordering': ('pk',),
},
),
migrations.CreateModel(
name='NotificationTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('description', models.TextField(default='', blank=True)),
('name', models.CharField(unique=True, max_length=512)),
('notification_type', models.CharField(max_length=32, choices=[('email', 'Email'), ('slack', 'Slack'), ('twilio', 'Twilio'), ('pagerduty', 'Pagerduty'), ('hipchat', 'HipChat'), ('webhook', 'Webhook'), ('mattermost', 'Mattermost'), ('rocketchat', 'Rocket.Chat'), ('irc', 'IRC')])),
('notification_configuration', jsonfield.fields.JSONField(default=dict)),
('created_by', models.ForeignKey(related_name="{u'class': 'notificationtemplate', u'app_label': 'main'}(class)s_created+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name="{u'class': 'notificationtemplate', u'app_label': 'main'}(class)s_modified+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('organization', models.ForeignKey(related_name='notification_templates', on_delete=django.db.models.deletion.SET_NULL, to='main.Organization', null=True)),
('tags', taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags')),
],
),
migrations.AddField(
model_name='notification',
name='notification_template',
field=models.ForeignKey(related_name='notifications', editable=False, on_delete=models.CASCADE, to='main.NotificationTemplate'),
),
migrations.AddField(
model_name='activitystream',
name='notification',
field=models.ManyToManyField(to='main.Notification', blank=True),
),
migrations.AddField(
model_name='activitystream',
name='notification_template',
field=models.ManyToManyField(to='main.NotificationTemplate', blank=True),
),
migrations.AddField(
model_name='organization',
name='notification_templates_any',
field=models.ManyToManyField(related_name='organization_notification_templates_for_any', to='main.NotificationTemplate', blank=True),
),
migrations.AddField(
model_name='organization',
name='notification_templates_error',
field=models.ManyToManyField(related_name='organization_notification_templates_for_errors', to='main.NotificationTemplate', blank=True),
),
migrations.AddField(
model_name='organization',
name='notification_templates_success',
field=models.ManyToManyField(related_name='organization_notification_templates_for_success', to='main.NotificationTemplate', blank=True),
),
migrations.AddField(
model_name='unifiedjob',
name='notifications',
field=models.ManyToManyField(related_name='unifiedjob_notifications', editable=False, to='main.Notification'),
),
migrations.AddField(
model_name='unifiedjobtemplate',
name='notification_templates_any',
field=models.ManyToManyField(related_name='unifiedjobtemplate_notification_templates_for_any', to='main.NotificationTemplate', blank=True),
),
migrations.AddField(
model_name='unifiedjobtemplate',
name='notification_templates_error',
field=models.ManyToManyField(related_name='unifiedjobtemplate_notification_templates_for_errors', to='main.NotificationTemplate', blank=True),
),
migrations.AddField(
model_name='unifiedjobtemplate',
name='notification_templates_success',
field=models.ManyToManyField(related_name='unifiedjobtemplate_notification_templates_for_success', to='main.NotificationTemplate', blank=True),
),
# Fact changes
migrations.CreateModel(
name='Fact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(default=None, help_text='Date and time of the corresponding fact scan gathering time.', editable=False)),
('module', models.CharField(max_length=128)),
('facts', awx.main.fields.JSONBField(default=dict, help_text='Arbitrary JSON structure of module facts captured at timestamp for a single host.', blank=True)),
('host', models.ForeignKey(related_name='facts', to='main.Host', on_delete=models.CASCADE, help_text='Host for the facts that the fact scan captured.')),
],
),
migrations.AlterIndexTogether(
name='fact',
index_together=set([('timestamp', 'module', 'host')]),
),
# Active flag removal
migrations.RemoveField(
model_name='credential',
name='active',
),
migrations.RemoveField(
model_name='custominventoryscript',
name='active',
),
migrations.RemoveField(
model_name='group',
name='active',
),
migrations.RemoveField(
model_name='host',
name='active',
),
migrations.RemoveField(
model_name='inventory',
name='active',
),
migrations.RemoveField(
model_name='organization',
name='active',
),
migrations.RemoveField(
model_name='permission',
name='active',
),
migrations.RemoveField(
model_name='schedule',
name='active',
),
migrations.RemoveField(
model_name='team',
name='active',
),
migrations.RemoveField(
model_name='unifiedjob',
name='active',
),
migrations.RemoveField(
model_name='unifiedjobtemplate',
name='active',
),
# RBAC Changes
# ############
migrations.RenameField(
'Organization',
'admins',
'deprecated_admins',
),
migrations.RenameField(
'Organization',
'users',
'deprecated_users',
),
migrations.RenameField(
'Team',
'users',
'deprecated_users',
),
migrations.RenameField(
'Team',
'projects',
'deprecated_projects',
),
migrations.AddField(
model_name='project',
name='organization',
field=models.ForeignKey(related_name='projects', to='main.Organization', on_delete=models.CASCADE, blank=True, null=True),
),
migrations.AlterField(
model_name='team',
name='deprecated_projects',
field=models.ManyToManyField(related_name='deprecated_teams', to='main.Project', blank=True),
),
migrations.RenameField(
model_name='organization',
old_name='projects',
new_name='deprecated_projects',
),
migrations.AlterField(
model_name='organization',
name='deprecated_projects',
field=models.ManyToManyField(related_name='deprecated_organizations', to='main.Project', blank=True),
),
migrations.RenameField(
'Credential',
'team',
'deprecated_team',
),
migrations.RenameField(
'Credential',
'user',
'deprecated_user',
),
migrations.AlterField(
model_name='organization',
name='deprecated_admins',
field=models.ManyToManyField(related_name='deprecated_admin_of_organizations', to=settings.AUTH_USER_MODEL, blank=True),
),
migrations.AlterField(
model_name='organization',
name='deprecated_users',
field=models.ManyToManyField(related_name='deprecated_organizations', to=settings.AUTH_USER_MODEL, blank=True),
),
migrations.AlterField(
model_name='team',
name='deprecated_users',
field=models.ManyToManyField(related_name='deprecated_teams', to=settings.AUTH_USER_MODEL, blank=True),
),
migrations.AlterUniqueTogether(
name='credential',
unique_together=set([]),
),
migrations.AddField(
model_name='credential',
name='organization',
field=models.ForeignKey(related_name='credentials', on_delete=models.CASCADE, default=None, blank=True, to='main.Organization', null=True),
),
#
# New RBAC models and fields
#
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('role_field', models.TextField()),
('singleton_name', models.TextField(default=None, unique=True, null=True, db_index=True)),
('members', models.ManyToManyField(related_name='roles', to=settings.AUTH_USER_MODEL)),
('parents', models.ManyToManyField(related_name='children', to='main.Role')),
('implicit_parents', models.TextField(default='[]')),
('content_type', models.ForeignKey(default=None, to='contenttypes.ContentType', on_delete=models.CASCADE, null=True)),
('object_id', models.PositiveIntegerField(default=None, null=True)),
],
options={
'db_table': 'main_rbac_roles',
'verbose_name_plural': 'roles',
},
),
migrations.CreateModel(
name='RoleAncestorEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('role_field', models.TextField()),
('content_type_id', models.PositiveIntegerField()),
('object_id', models.PositiveIntegerField()),
('ancestor', models.ForeignKey(on_delete=models.CASCADE, related_name='+', to='main.Role')),
('descendent', models.ForeignKey(on_delete=models.CASCADE, related_name='+', to='main.Role')),
],
options={
'db_table': 'main_rbac_role_ancestors',
'verbose_name_plural': 'role_ancestors',
},
),
migrations.AddField(
model_name='role',
name='ancestors',
field=models.ManyToManyField(related_name='descendents', through='main.RoleAncestorEntry', to='main.Role'),
),
migrations.AlterIndexTogether(
name='role',
index_together=set([('content_type', 'object_id')]),
),
migrations.AlterIndexTogether(
name='roleancestorentry',
index_together=set([('ancestor', 'content_type_id', 'object_id'), ('ancestor', 'content_type_id', 'role_field'), ('ancestor', 'descendent')]),
),
migrations.AddField(
model_name='credential',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['singleton:system_administrator'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='credential',
name='use_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='credential',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['singleton:system_auditor', 'organization.auditor_role', 'use_role', 'admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='custominventoryscript',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='organization.admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='custominventoryscript',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['organization.auditor_role', 'organization.member_role', 'admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='inventory',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='organization.admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='inventory',
name='adhoc_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='inventory',
name='update_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='inventory',
name='use_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='adhoc_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='inventory',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['organization.auditor_role', 'update_role', 'use_role', 'admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='jobtemplate',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['project.organization.admin_role', 'inventory.organization.admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='jobtemplate',
name='execute_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='jobtemplate',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['project.organization.auditor_role', 'inventory.organization.auditor_role', 'execute_role', 'admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='organization',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='singleton:system_administrator', to='main.Role', null='True'),
),
migrations.AddField(
model_name='organization',
name='auditor_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='singleton:system_auditor', to='main.Role', null='True'),
),
migrations.AddField(
model_name='organization',
name='member_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='organization',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['member_role', 'auditor_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='project',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['organization.admin_role', 'singleton:system_administrator'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='project',
name='use_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='project',
name='update_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='project',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['organization.auditor_role', 'singleton:system_auditor', 'use_role', 'update_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='team',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='organization.admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='team',
name='member_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=None, to='main.Role', null='True'),
),
migrations.AddField(
model_name='team',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['admin_role', 'organization.auditor_role', 'member_role'], to='main.Role', null='True'),
),
# System Job Templates
migrations.RunPython(create_system_job_templates, migrations.RunPython.noop),
migrations.AlterField(
model_name='systemjob',
name='job_type',
field=models.CharField(default='', max_length=32, blank=True, choices=[('cleanup_jobs', 'Remove jobs older than a certain number of days'), ('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'), ('cleanup_facts', 'Purge and/or reduce the granularity of system tracking data')]),
),
migrations.AlterField(
model_name='systemjobtemplate',
name='job_type',
field=models.CharField(default='', max_length=32, blank=True, choices=[('cleanup_jobs', 'Remove jobs older than a certain number of days'), ('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'), ('cleanup_facts', 'Purge and/or reduce the granularity of system tracking data')]),
),
# Credential domain field
migrations.AddField(
model_name='credential',
name='domain',
field=models.CharField(default='', help_text='The identifier for the domain.', max_length=100, verbose_name='Domain', blank=True),
),
# Create Labels
migrations.CreateModel(
name='Label',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('description', models.TextField(default='', blank=True)),
('name', models.CharField(max_length=512)),
('created_by', models.ForeignKey(related_name="{u'class': 'label', u'app_label': 'main'}(class)s_created+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name="{u'class': 'label', u'app_label': 'main'}(class)s_modified+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('organization', models.ForeignKey(related_name='labels', on_delete=django.db.models.deletion.CASCADE, to='main.Organization', help_text='Organization this label belongs to.')),
('tags', taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags')),
],
options={
'ordering': ('organization', 'name'),
},
),
migrations.AddField(
model_name='activitystream',
name='label',
field=models.ManyToManyField(to='main.Label', blank=True),
),
migrations.AddField(
model_name='job',
name='labels',
field=models.ManyToManyField(related_name='job_labels', to='main.Label', blank=True),
),
migrations.AddField(
model_name='jobtemplate',
name='labels',
field=models.ManyToManyField(related_name='jobtemplate_labels', to='main.Label', blank=True),
),
migrations.AlterUniqueTogether(
name='label',
unique_together=set([('name', 'organization')]),
),
# Label changes
migrations.AlterField(
model_name='label',
name='organization',
field=models.ForeignKey(related_name='labels', on_delete=django.db.models.deletion.CASCADE, default=None, blank=True, to='main.Organization', help_text='Organization this label belongs to.', null=True),
),
migrations.AlterField(
model_name='label',
name='organization',
field=models.ForeignKey(related_name='labels', on_delete=django.db.models.deletion.CASCADE, to='main.Organization', help_text='Organization this label belongs to.'),
),
# InventorySource Credential
migrations.AddField(
model_name='job',
name='network_credential',
field=models.ForeignKey(related_name='jobs_as_network_credential+', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Credential', null=True),
),
migrations.AddField(
model_name='jobtemplate',
name='network_credential',
field=models.ForeignKey(related_name='jobtemplates_as_network_credential+', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Credential', null=True),
),
migrations.AddField(
model_name='credential',
name='authorize',
field=models.BooleanField(default=False, help_text='Whether to use the authorize mechanism.'),
),
migrations.AddField(
model_name='credential',
name='authorize_password',
field=models.CharField(default='', help_text='Password used by the authorize mechanism.', max_length=1024, blank=True),
),
migrations.AlterField(
model_name='credential',
name='deprecated_team',
field=models.ForeignKey(related_name='deprecated_credentials', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Team', null=True),
),
migrations.AlterField(
model_name='credential',
name='deprecated_user',
field=models.ForeignKey(related_name='deprecated_credentials', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AlterField(
model_name='credential',
name='kind',
field=models.CharField(default='ssh', max_length=32, choices=[('ssh', 'Machine'), ('net', 'Network'), ('scm', 'Source Control'), ('aws', 'Amazon Web Services'), ('rax', 'Rackspace'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('cloudforms', 'Red Hat CloudForms'), ('gce', 'Google Compute Engine'), ('azure', 'Microsoft Azure'), ('openstack', 'OpenStack')]),
),
migrations.AlterField(
model_name='inventorysource',
name='source',
field=models.CharField(default='', max_length=32, blank=True, choices=[('', 'Manual'), ('file', 'Local File, Directory or Script'), ('rax', 'Rackspace Cloud Servers'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure', 'Microsoft Azure'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('cloudforms', 'Red Hat CloudForms'), ('openstack', 'OpenStack'), ('custom', 'Custom Script')]),
),
migrations.AlterField(
model_name='inventoryupdate',
name='source',
field=models.CharField(default='', max_length=32, blank=True, choices=[('', 'Manual'), ('file', 'Local File, Directory or Script'), ('rax', 'Rackspace Cloud Servers'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure', 'Microsoft Azure'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('cloudforms', 'Red Hat CloudForms'), ('openstack', 'OpenStack'), ('custom', 'Custom Script')]),
),
migrations.AlterField(
model_name='team',
name='deprecated_projects',
field=models.ManyToManyField(related_name='deprecated_teams', to='main.Project', blank=True),
),
# Prompting changes
migrations.AddField(
model_name='jobtemplate',
name='ask_limit_on_launch',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_inventory_on_launch',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_credential_on_launch',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_job_type_on_launch',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_tags_on_launch',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='job',
name='inventory',
field=models.ForeignKey(related_name='jobs', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Inventory', null=True),
),
migrations.AlterField(
model_name='jobtemplate',
name='inventory',
field=models.ForeignKey(related_name='jobtemplates', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Inventory', null=True),
),
# Host ordering
migrations.AlterModelOptions(
name='host',
options={'ordering': ('name',)},
),
# New Azure credential
migrations.AddField(
model_name='credential',
name='client',
field=models.CharField(default='', help_text='Client Id or Application Id for the credential', max_length=128, blank=True),
),
migrations.AddField(
model_name='credential',
name='secret',
field=models.CharField(default='', help_text='Secret Token for this credential', max_length=1024, blank=True),
),
migrations.AddField(
model_name='credential',
name='subscription',
field=models.CharField(default='', help_text='Subscription identifier for this credential', max_length=1024, blank=True),
),
migrations.AddField(
model_name='credential',
name='tenant',
field=models.CharField(default='', help_text='Tenant identifier for this credential', max_length=1024, blank=True),
),
migrations.AlterField(
model_name='credential',
name='kind',
field=models.CharField(default='ssh', max_length=32, choices=[('ssh', 'Machine'), ('net', 'Network'), ('scm', 'Source Control'), ('aws', 'Amazon Web Services'), ('rax', 'Rackspace'), ('vmware', 'VMware vCenter'), ('satellite6', 'Satellite 6'), ('cloudforms', 'CloudForms'), ('gce', 'Google Compute Engine'), ('azure', 'Microsoft Azure Classic (deprecated)'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('openstack', 'OpenStack')]),
),
migrations.AlterField(
model_name='host',
name='instance_id',
field=models.CharField(default='', max_length=1024, blank=True),
),
migrations.AlterField(
model_name='inventorysource',
name='source',
field=models.CharField(default='', max_length=32, blank=True, choices=[('', 'Manual'), ('file', 'Local File, Directory or Script'), ('rax', 'Rackspace Cloud Servers'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure', 'Microsoft Azure Classic (deprecated)'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('vmware', 'VMware vCenter'), ('satellite6', 'Satellite 6'), ('cloudforms', 'CloudForms'), ('openstack', 'OpenStack'), ('custom', 'Custom Script')]),
),
migrations.AlterField(
model_name='inventoryupdate',
name='source',
field=models.CharField(default='', max_length=32, blank=True, choices=[('', 'Manual'), ('file', 'Local File, Directory or Script'), ('rax', 'Rackspace Cloud Servers'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure', 'Microsoft Azure Classic (deprecated)'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('vmware', 'VMware vCenter'), ('satellite6', 'Satellite 6'), ('cloudforms', 'CloudForms'), ('openstack', 'OpenStack'), ('custom', 'Custom Script')]),
),
]
| {
"content_hash": "642416686c93a1415ea22f4734d7cd60",
"timestamp": "",
"source": "github",
"line_count": 736,
"max_line_length": 487,
"avg_line_length": 50.48641304347826,
"alnum_prop": 0.588406265138059,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "89fce679eae41c6f3cb4bf6b9b95344d9f168713",
"size": "37242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx/main/migrations/0002_squashed_v300_release.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import json
from .api_test_base import ApiTestBase
class TestAPIBNFCodeViews(ApiTestBase):
api_prefix = "/api/1.0"
def assertNotJson(self, content):
try:
json.loads(content)
raise AssertionError("Expected %s... to be non-JSON" % content[:10])
except ValueError:
pass
def assertJson(self, content):
try:
json.loads(content)
except ValueError:
raise AssertionError("Expected %s... to be JSON" % content[:10])
def test_header_and_query_string_json_negotiation(self):
url = "%s/bnf_code?q=lor&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertJson(response.content)
url = "%s/bnf_code?q=lor&format=json" % self.api_prefix
response = self.client.get(url, {}, follow=True, HTTP_ACCEPT="text/html")
self.assertJson(response.content)
url = "%s/bnf_code?q=lor" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertNotJson(response.content)
url = "%s/bnf_code?q=lor" % self.api_prefix
response = self.client.get(url, {}, follow=True, HTTP_ACCEPT="application/json")
self.assertNotJson(response.content)
def test_api_view_bnf_chemical(self):
url = "%s/bnf_code?q=lor&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 5)
self.assertEqual(content[0]["id"], "0202010D0")
self.assertEqual(content[0]["name"], "Chlorothiazide")
self.assertEqual(content[0]["type"], "chemical")
self.assertEqual(content[3]["id"], "0202010D0AA")
self.assertEqual(content[3]["name"], "Chloroth")
self.assertEqual(content[3]["type"], "product")
url = "%s/bnf_code?q=0202010D0BD&exact=true&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 1)
self.assertEqual(content[0]["id"], "0202010D0BD")
self.assertEqual(content[0]["name"], "Chlotride")
self.assertEqual(content[0]["is_generic"], False)
url = "%s/bnf_code?q=0202010D0bd&exact=true&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 1)
self.assertEqual(content[0]["id"], "0202010D0BD")
self.assertEqual(content[0]["name"], "Chlotride")
self.assertEqual(content[0]["is_generic"], False)
def test_inactive_chemical(self):
url = "%s/bnf_code?q=0204ZZZZZ&exact=true&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 0)
def test_api_view_bnf_section(self):
url = "%s/bnf_code?q=diuretics&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 2)
self.assertEqual(content[0]["id"], "2.2")
self.assertEqual(content[0]["name"], "Diuretics")
self.assertEqual(content[0]["type"], "BNF section")
self.assertEqual(content[1]["id"], "2.2.1")
self.assertEqual(content[1]["name"], "Thiazides And Related Diuretics")
self.assertEqual(content[1]["type"], "BNF paragraph")
url = "%s/bnf_code?q=cardio&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 1)
self.assertEqual(content[0]["id"], "2")
self.assertEqual(content[0]["name"], "Cardiovascular System")
self.assertEqual(content[0]["type"], "BNF chapter")
url = "%s/bnf_code?q=2.2&exact=true&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 1)
self.assertEqual(content[0]["id"], "2.2")
self.assertEqual(content[0]["name"], "Diuretics")
self.assertEqual(content[0]["type"], "BNF section")
url = "%s/bnf_code?q=0202&exact=true&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 1)
self.assertEqual(content[0]["id"], "2.2")
self.assertEqual(content[0]["name"], "Diuretics")
self.assertEqual(content[0]["type"], "BNF section")
def test_inactive_section(self):
url = "%s/bnf_code?q=5.99&exact=true&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 0)
def test_api_view_bnf_presentation(self):
url = "%s/bnf_code?q=Bendroflume&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 3)
self.assertEqual(content[0]["id"], "0202010B0")
self.assertEqual(content[0]["name"], "Bendroflumethiazide")
url = "%s/bnf_code?q=0202010F0AAAAAA&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 1)
self.assertEqual(content[0]["id"], "0202010F0AAAAAA")
self.assertEqual(content[0]["name"], "Chlortalidone_Tab 50mg")
self.assertEqual(content[0]["type"], "product format")
url = "%s/bnf_code?q=0202010F0AAA&exact=true&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 0)
def test_inactive_presentation(self):
url = "%s/bnf_code?q=non-current+product&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 0)
def test_api_view_bnf_presentation_replacements(self):
url = "%s/bnf_code?q=Labetalol+50&format=json" % self.api_prefix
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(len(content), 1)
| {
"content_hash": "c734c62abbcb1850079045f1c1f3998a",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 88,
"avg_line_length": 45.3125,
"alnum_prop": 0.6391724137931034,
"repo_name": "annapowellsmith/openpresc",
"id": "6f735ce802b171883e0fc2135dc8edf571aa78a1",
"size": "7250",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "openprescribing/frontend/tests/test_api_bnf_codes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "95907"
},
{
"name": "HTML",
"bytes": "68653"
},
{
"name": "JavaScript",
"bytes": "14332669"
},
{
"name": "Makefile",
"bytes": "103"
},
{
"name": "Python",
"bytes": "352287"
},
{
"name": "Shell",
"bytes": "3537"
}
],
"symlink_target": ""
} |
'''
pi@raspberrypi ~ $ echo $LANG
zh_TW.UTF-8
https://github.com/ashtons/picam
url http://host:port/s/foo_webapp.html
'''
import settings
import picam
import logging,threading
import datetime,time
import Image
import httplib, urllib
import collections,array
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
# False when test
lastEvtTime = 0
class WSHandler(tornado.websocket.WebSocketHandler):
connections = set()
lock = threading.Lock()
def open(self):
print 'New connection was opened'
#self.write_message("Welcome to my websocket!")
self.lock.acquire()
try:
self.connections.add(self)
finally:
self.lock.release()
def on_message(self, message):
print 'Incoming message:', message
#self.write_message("You said: " + message)
def on_close(self):
print 'Connection was closed...'
self.lock.acquire()
try:
self.connections.remove(self)
finally:
self.lock.release()
@classmethod
def wsSend(cls,msg):
#logging.debug("sending message %s" %msg)
cls.lock.acquire()
try:
for conn in cls.connections:
try:
conn.write_message(msg)
except:
logging.error("Error sending message",exc_info=True)
finally:
cls.lock.release()
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello Test")
application = tornado.web.Application([
(r'/ws', WSHandler),(r'/',MainHandler),
(r'/s/(.*)', tornado.web.StaticFileHandler, {'path': settings.WWW}),
])
def pushoverPost(msg):
if not settings.PUSHOVER_ENABLE :
logging.info('[TestPrintOnly]Send pushover event')
return
conn = httplib.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.urlencode({
"token": settings.PUSHOVER_APPTOKEN,
"user": settings.PUSHOVER_USERKEY,
"message": msg,
}), { "Content-type": "application/x-www-form-urlencoded" })
logging.info('HTTP POST Send %s' % msg)
r = conn.getresponse()
logging.info("HTTP POST status=%d , reason=%s",r.status,r.reason)
logging.info(r.read())
conn.close()
def found(q):
global lastEvtTime
lastEvtTime = time.time()
logging.info("EVENT FOUND")
m = 'ๆๅฎถF้ Event px=%d'%q
t = threading.Thread(target=pushoverPost, args=(m,))
t.start()
def initLog():
dateTag = datetime.datetime.now().strftime("%Y%b%d_%H%M%S")
logging.basicConfig(filename="mt_%s.log"%dateTag,level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# set a format which is simpler for console use
formatter = logging.Formatter('%(asctime)s : %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
logging.info('Started')
def isMotion4(kl):
return len(kl)==4 and kl[1]-kl[0] > 777 and kl[2] > 1000 and kl[3] > 1000
def handleMotion(k,q):
if isMotion4(k):
ediff = time.time() - lastEvtTime
logging.debug("EvtTimeDiff=%d" % ediff)
if ediff > 300:
found(q)
def startTornado():
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(settings.PORT)
tornado.ioloop.IOLoop.instance().start()
def stopTornado():
tornado.ioloop.IOLoop.instance().stop()
def main():
initLog()
t = threading.Thread(target=startTornado).start()
try:
runDiffCheck()
except (KeyboardInterrupt, SystemExit):
stopTornado()
raise
def runDiffCheck():
k = collections.deque(maxlen=4)
width = 100
height = 100
THRESHOLD = 15
QUANITY_MIN = 50
f1 = picam.takeRGBPhotoWithDetails(width,height)
while True:
f2 = picam.takeRGBPhotoWithDetails(width,height)
(_,q) = picam.difference(f1,f2,THRESHOLD)
if q > 10 : logging.debug("px=%d", q)
k.append(q)
#print 'px %d' %q
WSHandler.wsSend(str(q))
picam.LEDOn() if q > QUANITY_MIN else picam.LEDOff()
handleMotion(k,q)
f1 = f2
if __name__ == '__main__':
main()
| {
"content_hash": "06ff7d7bcfa7dfefbb4c946d8baa5cf0",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 78,
"avg_line_length": 28.0188679245283,
"alnum_prop": 0.6338945005611673,
"repo_name": "y12studio/pi",
"id": "e54fe3a32c8d84eac2a00bc7bef26fb6f68f49d8",
"size": "4476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testonly/motion_pushover/motion_pushover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "1331"
},
{
"name": "C++",
"bytes": "24503"
},
{
"name": "CSS",
"bytes": "2097"
},
{
"name": "Dart",
"bytes": "13291"
},
{
"name": "JavaScript",
"bytes": "246430"
},
{
"name": "Python",
"bytes": "89283"
},
{
"name": "Shell",
"bytes": "694"
}
],
"symlink_target": ""
} |
"""
Tensorflow vs. e.g. oracle, train or no, that kind of stuff
May want to refactor name
"""
from experiment_construction.learner_construction.dummy_learner import DummyLearner
from experiment_construction.learner_construction.tensorflow_learner import TensorflowModel
from experiment_construction.learner_construction.validation_set_evaluator import ValidationSetEvaluator
class LearnerFactory:
def __init__(self, evaluator_factory, preprocessor_factory, candidate_generator_factory, candidate_selector_factory, example_processor_factory, index_factory):
self.evaluator_factory = evaluator_factory
self.preprocessor_factory = preprocessor_factory
self.candidate_generator_factory = candidate_generator_factory
self.candidate_selector_factory = candidate_selector_factory
self.example_processor_factory = example_processor_factory
self.index_factory = index_factory
def construct_learner(self, settings):
preprocessor = self.preprocessor_factory.construct_preprocessor(settings)
candidate_generator = self.candidate_generator_factory.construct_candidate_generator(settings)
candidate_selector = self.candidate_selector_factory.construct_candidate_selector(settings)
example_processor = self.example_processor_factory.construct_example_processor(settings)
index = self.index_factory.construct_indexes(settings)
learner = self.get_base_learner(candidate_selector, settings)
learner.set_preprocessor(preprocessor)
learner.set_candidate_generator(candidate_generator)
learner.set_candidate_selector(candidate_selector)
learner.set_example_processor(example_processor)
learner.set_relation_indexer(index.relation_indexer)
if "early_stopping" in settings["training"] or "epochs_between_tests" in settings["training"]:
evaluator = self.evaluator_factory.construct_evaluator(settings, "valid_file")
learner = ValidationSetEvaluator(learner, evaluator)
for k, v in settings["training"].items():
learner.update_setting(k, v)
return learner
def get_base_learner(self, candidate_selector, settings):
if candidate_selector.is_tensorflow:
return TensorflowModel()
else:
return DummyLearner(settings["dataset"]["valid_file"], settings["endpoint"]["prefix"]) | {
"content_hash": "5e3f299334ef7f9d6f1887286f986e40",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 163,
"avg_line_length": 50.020833333333336,
"alnum_prop": 0.7396917950853811,
"repo_name": "MichSchli/QuestionAnsweringGCN",
"id": "cb126091a36ed1835a79bc76d47387d9320fc84f",
"size": "2401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_version/experiment_construction/learner_construction/learner_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "730851"
},
{
"name": "Shell",
"bytes": "1446"
}
],
"symlink_target": ""
} |
"""Unit tests for model_activation.py."""
import unittest
import numpy
from gewittergefahr.deep_learning import model_activation
# The following constants are used to test get_hilo_activation_examples.
STORM_ACTIVATIONS = numpy.array([
3.0, -0.2, 0.6, -2.3, 4.3, -0.2, -1.3, -2.1, 0.0, 0.3, 1.1, -1.2, 2.5, -1.2,
-1.5
])
FULL_STORM_ID_STRINGS = [
'a', 'd', 'b', 'c', 'a', 'b', 'c', 'd', 'c', 'b', 'c', 'd', 'd', 'b', 'd'
]
NUM_LOW_ACTIVATIONS_FEW = 4
NUM_HIGH_ACTIVATIONS_FEW = 3
NUM_LOW_ACTIVATIONS_MANY = 16
NUM_HIGH_ACTIVATIONS_MANY = 10
LOW_INDICES_FEW = numpy.array([3, 7, 14, 6], dtype=int)
LOW_INDICES_MANY = numpy.array(
[3, 7, 14, 6, 11, 13, 1, 5, 8, 9, 2, 10, 12, 0, 4], dtype=int
)
LOW_INDICES_FEW_UNIQUE = numpy.array([3, 7, 13, 0], dtype=int)
LOW_INDICES_MANY_UNIQUE = numpy.array([3, 7, 13, 0], dtype=int)
HIGH_INDICES_FEW = numpy.array([4, 0, 12], dtype=int)
HIGH_INDICES_MANY = numpy.array([4, 0, 12, 10, 2, 9, 8, 1, 5, 11], dtype=int)
HIGH_INDICES_FEW_UNIQUE = numpy.array([4, 12, 10], dtype=int)
HIGH_INDICES_MANY_UNIQUE = numpy.array([4, 12, 10, 2], dtype=int)
# The following constants are used to test get_contingency_table_extremes.
STORM_TARGET_VALUES = numpy.array(
[1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0], dtype=int
)
NUM_HITS_FEW = 2
NUM_MISSES_FEW = 3
NUM_FALSE_ALARMS_FEW = 4
NUM_CORRECT_NULLS_FEW = 5
NUM_HITS_MANY = 8
NUM_MISSES_MANY = 9
NUM_FALSE_ALARMS_MANY = 10
NUM_CORRECT_NULLS_MANY = 11
HIT_INDICES_FEW = numpy.array([4, 0], dtype=int)
MISS_INDICES_FEW = numpy.array([6, 11, 13], dtype=int)
FALSE_ALARM_INDICES_FEW = numpy.array([12, 8, 1, 5], dtype=int)
CORRECT_NULL_INDICES_FEW = numpy.array([3, 7, 14, 1, 5], dtype=int)
HIT_INDICES_FEW_UNIQUE = numpy.array([4, 10], dtype=int)
MISS_INDICES_FEW_UNIQUE = numpy.array([6, 11, 13], dtype=int)
FALSE_ALARM_INDICES_FEW_UNIQUE = numpy.array([12, 8, 5], dtype=int)
CORRECT_NULL_INDICES_FEW_UNIQUE = numpy.array([3, 7, 5], dtype=int)
HIT_INDICES_MANY = numpy.array([4, 0, 10, 2, 9, 11, 13, 6], dtype=int)
MISS_INDICES_MANY = numpy.array([6, 11, 13, 9, 2, 10, 0, 4], dtype=int)
FALSE_ALARM_INDICES_MANY = numpy.array([12, 8, 1, 5, 14, 7, 3], dtype=int)
CORRECT_NULL_INDICES_MANY = numpy.array([3, 7, 14, 1, 5, 8, 12], dtype=int)
HIT_INDICES_MANY_UNIQUE = numpy.array([4, 10, 2, 11], dtype=int)
MISS_INDICES_MANY_UNIQUE = numpy.array([6, 11, 13, 0], dtype=int)
FALSE_ALARM_INDICES_MANY_UNIQUE = numpy.array([12, 8, 5], dtype=int)
CORRECT_NULL_INDICES_MANY_UNIQUE = numpy.array([3, 7, 5], dtype=int)
class ModelActivationTests(unittest.TestCase):
"""Unit tests for model_activation.py."""
def test_get_hilo_activations_few(self):
"""Ensures correct output from _get_hilo_activation_examples.
In this case, only few examples are returned.
"""
these_high_indices, these_low_indices = (
model_activation.get_hilo_activation_examples(
storm_activations=STORM_ACTIVATIONS,
num_low_activation_examples=NUM_LOW_ACTIVATIONS_FEW,
num_high_activation_examples=NUM_HIGH_ACTIVATIONS_FEW,
unique_storm_cells=False)
)
self.assertTrue(numpy.array_equal(these_high_indices, HIGH_INDICES_FEW))
self.assertTrue(numpy.array_equal(these_low_indices, LOW_INDICES_FEW))
def test_get_hilo_activations_few_unique(self):
"""Ensures correct output from _get_hilo_activation_examples.
In this case, only few examples (from unique storm cells) are returned.
"""
these_high_indices, these_low_indices = (
model_activation.get_hilo_activation_examples(
storm_activations=STORM_ACTIVATIONS,
num_low_activation_examples=NUM_LOW_ACTIVATIONS_FEW,
num_high_activation_examples=NUM_HIGH_ACTIVATIONS_FEW,
unique_storm_cells=True,
full_storm_id_strings=FULL_STORM_ID_STRINGS)
)
self.assertTrue(numpy.array_equal(
these_high_indices, HIGH_INDICES_FEW_UNIQUE
))
self.assertTrue(numpy.array_equal(
these_low_indices, LOW_INDICES_FEW_UNIQUE
))
def test_get_hilo_activations_many(self):
"""Ensures correct output from _get_hilo_activation_examples.
In this case, many examples are returned.
"""
these_high_indices, these_low_indices = (
model_activation.get_hilo_activation_examples(
storm_activations=STORM_ACTIVATIONS,
num_low_activation_examples=NUM_LOW_ACTIVATIONS_MANY,
num_high_activation_examples=NUM_HIGH_ACTIVATIONS_MANY,
unique_storm_cells=False)
)
self.assertTrue(numpy.array_equal(
these_high_indices, HIGH_INDICES_MANY
))
self.assertTrue(numpy.array_equal(these_low_indices, LOW_INDICES_MANY))
def test_get_hilo_activations_many_unique(self):
"""Ensures correct output from _get_hilo_activation_examples.
In this case, many examples (from unique storm cells) are returned.
"""
these_high_indices, these_low_indices = (
model_activation.get_hilo_activation_examples(
storm_activations=STORM_ACTIVATIONS,
num_low_activation_examples=NUM_LOW_ACTIVATIONS_MANY,
num_high_activation_examples=NUM_HIGH_ACTIVATIONS_MANY,
unique_storm_cells=True,
full_storm_id_strings=FULL_STORM_ID_STRINGS)
)
self.assertTrue(numpy.array_equal(
these_high_indices, HIGH_INDICES_MANY_UNIQUE
))
self.assertTrue(numpy.array_equal(
these_low_indices, LOW_INDICES_MANY_UNIQUE
))
def test_get_ct_extremes_few(self):
"""Ensures correct output from get_contingency_table_extremes.
In this case, only few examples are returned.
"""
this_dict = model_activation.get_contingency_table_extremes(
storm_activations=STORM_ACTIVATIONS,
storm_target_values=STORM_TARGET_VALUES, num_hits=NUM_HITS_FEW,
num_misses=NUM_MISSES_FEW, num_false_alarms=NUM_FALSE_ALARMS_FEW,
num_correct_nulls=NUM_CORRECT_NULLS_FEW, unique_storm_cells=False)
self.assertTrue(numpy.array_equal(
this_dict[model_activation.HIT_INDICES_KEY], HIT_INDICES_FEW
))
self.assertTrue(numpy.array_equal(
this_dict[model_activation.MISS_INDICES_KEY], MISS_INDICES_FEW
))
self.assertTrue(numpy.array_equal(
this_dict[model_activation.FALSE_ALARM_INDICES_KEY],
FALSE_ALARM_INDICES_FEW
))
self.assertTrue(numpy.array_equal(
this_dict[model_activation.CORRECT_NULL_INDICES_KEY],
CORRECT_NULL_INDICES_FEW
))
def test_get_ct_extremes_few_unique(self):
"""Ensures correct output from get_contingency_table_extremes.
In this case, only few examples (from unique storm cells) are returned.
"""
this_dict = model_activation.get_contingency_table_extremes(
storm_activations=STORM_ACTIVATIONS,
storm_target_values=STORM_TARGET_VALUES, num_hits=NUM_HITS_FEW,
num_misses=NUM_MISSES_FEW, num_false_alarms=NUM_FALSE_ALARMS_FEW,
num_correct_nulls=NUM_CORRECT_NULLS_FEW, unique_storm_cells=True,
full_storm_id_strings=FULL_STORM_ID_STRINGS)
self.assertTrue(numpy.array_equal(
this_dict[model_activation.HIT_INDICES_KEY], HIT_INDICES_FEW_UNIQUE
))
self.assertTrue(numpy.array_equal(
this_dict[model_activation.MISS_INDICES_KEY],
MISS_INDICES_FEW_UNIQUE
))
self.assertTrue(numpy.array_equal(
this_dict[model_activation.FALSE_ALARM_INDICES_KEY],
FALSE_ALARM_INDICES_FEW_UNIQUE
))
self.assertTrue(numpy.array_equal(
this_dict[model_activation.CORRECT_NULL_INDICES_KEY],
CORRECT_NULL_INDICES_FEW_UNIQUE
))
def test_get_ct_extremes_many(self):
"""Ensures correct output from get_contingency_table_extremes.
In this case, many examples are returned.
"""
this_dict = model_activation.get_contingency_table_extremes(
storm_activations=STORM_ACTIVATIONS,
storm_target_values=STORM_TARGET_VALUES, num_hits=NUM_HITS_MANY,
num_misses=NUM_MISSES_MANY, num_false_alarms=NUM_FALSE_ALARMS_MANY,
num_correct_nulls=NUM_CORRECT_NULLS_MANY, unique_storm_cells=False)
self.assertTrue(numpy.array_equal(
this_dict[model_activation.HIT_INDICES_KEY], HIT_INDICES_MANY
))
self.assertTrue(numpy.array_equal(
this_dict[model_activation.MISS_INDICES_KEY], MISS_INDICES_MANY
))
self.assertTrue(numpy.array_equal(
this_dict[model_activation.FALSE_ALARM_INDICES_KEY],
FALSE_ALARM_INDICES_MANY
))
self.assertTrue(numpy.array_equal(
this_dict[model_activation.CORRECT_NULL_INDICES_KEY],
CORRECT_NULL_INDICES_MANY
))
def test_get_ct_extremes_many_unique(self):
"""Ensures correct output from get_contingency_table_extremes.
In this case, many examples (from unique storm cells) are returned.
"""
this_dict = model_activation.get_contingency_table_extremes(
storm_activations=STORM_ACTIVATIONS,
storm_target_values=STORM_TARGET_VALUES, num_hits=NUM_HITS_MANY,
num_misses=NUM_MISSES_MANY, num_false_alarms=NUM_FALSE_ALARMS_MANY,
num_correct_nulls=NUM_CORRECT_NULLS_MANY, unique_storm_cells=True,
full_storm_id_strings=FULL_STORM_ID_STRINGS)
self.assertTrue(numpy.array_equal(
this_dict[model_activation.HIT_INDICES_KEY],
HIT_INDICES_MANY_UNIQUE
))
self.assertTrue(numpy.array_equal(
this_dict[model_activation.MISS_INDICES_KEY],
MISS_INDICES_MANY_UNIQUE
))
self.assertTrue(numpy.array_equal(
this_dict[model_activation.FALSE_ALARM_INDICES_KEY],
FALSE_ALARM_INDICES_MANY_UNIQUE
))
self.assertTrue(numpy.array_equal(
this_dict[model_activation.CORRECT_NULL_INDICES_KEY],
CORRECT_NULL_INDICES_MANY_UNIQUE
))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "c3da697b2674d700b2f102931014de9d",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 80,
"avg_line_length": 38.75555555555555,
"alnum_prop": 0.6338876146788991,
"repo_name": "thunderhoser/GewitterGefahr",
"id": "8cb3d7c66c599008b56e66ca35e8f7af978b984c",
"size": "10464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gewittergefahr/deep_learning/model_activation_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "31275"
},
{
"name": "Python",
"bytes": "5661041"
}
],
"symlink_target": ""
} |
import os
import io
import sys
import time
import platform
from contextlib import contextmanager
"""This is (mostly) a standalone module used to write logging
information about Meson runs. Some output goes to screen,
some to logging dir and some goes to both."""
def _windows_ansi():
from ctypes import windll, byref
from ctypes.wintypes import DWORD
kernel = windll.kernel32
stdout = kernel.GetStdHandle(-11)
mode = DWORD()
if not kernel.GetConsoleMode(stdout, byref(mode)):
return False
# ENABLE_VIRTUAL_TERMINAL_PROCESSING == 0x4
# If the call to enable VT processing fails (returns 0), we fallback to
# original behavior
return kernel.SetConsoleMode(stdout, mode.value | 0x4) or os.environ.get('ANSICON')
if platform.system().lower() == 'windows':
colorize_console = os.isatty(sys.stdout.fileno()) and _windows_ansi()
else:
colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('TERM') != 'dumb'
log_dir = None
log_file = None
log_fname = 'meson-log.txt'
log_depth = 0
log_timestamp_start = None
def initialize(logdir):
global log_dir, log_file
log_dir = logdir
log_file = open(os.path.join(logdir, log_fname), 'w', encoding='utf8')
def set_timestamp_start(start):
global log_timestamp_start
log_timestamp_start = start
def shutdown():
global log_file
if log_file is not None:
path = log_file.name
exception_around_goer = log_file
log_file = None
exception_around_goer.close()
return path
return None
class AnsiDecorator:
plain_code = "\033[0m"
def __init__(self, text, code, quoted=False):
self.text = text
self.code = code
self.quoted = quoted
def get_text(self, with_codes):
text = self.text
if with_codes:
text = self.code + self.text + AnsiDecorator.plain_code
if self.quoted:
text = '"{}"'.format(text)
return text
def bold(text, quoted=False):
return AnsiDecorator(text, "\033[1m", quoted=quoted)
def red(text):
return AnsiDecorator(text, "\033[1;31m")
def green(text):
return AnsiDecorator(text, "\033[1;32m")
def yellow(text):
return AnsiDecorator(text, "\033[1;33m")
def cyan(text):
return AnsiDecorator(text, "\033[1;36m")
def process_markup(args, keep):
arr = []
if log_timestamp_start is not None:
arr = ['[{:.3f}]'.format(time.monotonic() - log_timestamp_start)]
for arg in args:
if isinstance(arg, str):
arr.append(arg)
elif isinstance(arg, AnsiDecorator):
arr.append(arg.get_text(keep))
else:
arr.append(str(arg))
return arr
def force_print(*args, **kwargs):
iostr = io.StringIO()
kwargs['file'] = iostr
print(*args, **kwargs)
raw = iostr.getvalue()
if log_depth > 0:
prepend = '|' * log_depth
raw = prepend + raw.replace('\n', '\n' + prepend, raw.count('\n') - 1)
# _Something_ is going to get printed.
try:
print(raw, end='')
except UnicodeEncodeError:
cleaned = raw.encode('ascii', 'replace').decode('ascii')
print(cleaned, end='')
def debug(*args, **kwargs):
arr = process_markup(args, False)
if log_file is not None:
print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.
log_file.flush()
def log(*args, **kwargs):
arr = process_markup(args, False)
if log_file is not None:
print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.
log_file.flush()
if colorize_console:
arr = process_markup(args, True)
force_print(*arr, **kwargs)
def _log_error(severity, *args, **kwargs):
from .mesonlib import get_error_location_string
from .environment import build_filename
if severity == 'warning':
args = (yellow('WARNING:'),) + args
elif severity == 'error':
args = (red('ERROR:'),) + args
elif severity == 'deprecation':
args = (red('DEPRECATION:'),) + args
else:
assert False, 'Invalid severity ' + severity
location = kwargs.pop('location', None)
if location is not None:
location_file = os.path.join(location.subdir, build_filename)
location_str = get_error_location_string(location_file, location.lineno)
args = (location_str,) + args
log(*args, **kwargs)
def error(*args, **kwargs):
return _log_error('error', *args, **kwargs)
def warning(*args, **kwargs):
return _log_error('warning', *args, **kwargs)
def deprecation(*args, **kwargs):
return _log_error('deprecation', *args, **kwargs)
def exception(e):
log()
if hasattr(e, 'file') and hasattr(e, 'lineno') and hasattr(e, 'colno'):
log('%s:%d:%d:' % (e.file, e.lineno, e.colno), red('ERROR: '), e)
else:
log(red('ERROR:'), e)
# Format a list for logging purposes as a string. It separates
# all but the last item with commas, and the last with 'and'.
def format_list(list):
l = len(list)
if l > 2:
return ' and '.join([', '.join(list[:-1]), list[-1]])
elif l == 2:
return ' and '.join(list)
elif l == 1:
return list[0]
else:
return ''
@contextmanager
def nested():
global log_depth
log_depth += 1
try:
yield
finally:
log_depth -= 1
| {
"content_hash": "1061add939f7f4323393b7f5be4b0250",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 90,
"avg_line_length": 28.636363636363637,
"alnum_prop": 0.6171802054154996,
"repo_name": "jeandet/meson",
"id": "b8d3cccfb8b9b3eda2a7dfabb018eaef138e1cec",
"size": "5948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesonbuild/mlog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4190"
},
{
"name": "Batchfile",
"bytes": "868"
},
{
"name": "C",
"bytes": "143772"
},
{
"name": "C#",
"bytes": "949"
},
{
"name": "C++",
"bytes": "27136"
},
{
"name": "CMake",
"bytes": "1780"
},
{
"name": "D",
"bytes": "4573"
},
{
"name": "Dockerfile",
"bytes": "754"
},
{
"name": "Emacs Lisp",
"bytes": "919"
},
{
"name": "Fortran",
"bytes": "4590"
},
{
"name": "Genie",
"bytes": "341"
},
{
"name": "Inno Setup",
"bytes": "372"
},
{
"name": "Java",
"bytes": "2125"
},
{
"name": "JavaScript",
"bytes": "136"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "135"
},
{
"name": "Meson",
"bytes": "321893"
},
{
"name": "Objective-C",
"bytes": "1092"
},
{
"name": "Objective-C++",
"bytes": "332"
},
{
"name": "Python",
"bytes": "1873182"
},
{
"name": "Roff",
"bytes": "301"
},
{
"name": "Rust",
"bytes": "1079"
},
{
"name": "Shell",
"bytes": "2083"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10025"
},
{
"name": "Verilog",
"bytes": "709"
},
{
"name": "Vim script",
"bytes": "9480"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
} |
"""Parses the command line, discovers the appropriate benchmarks, and runs them.
Handles benchmark configuration, but all the logic for
actually running the benchmark is in Benchmark and PageRunner."""
import argparse
import json
import logging
import os
import sys
from telemetry import benchmark
from telemetry.core import discover
from telemetry import decorators
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_options
from telemetry.internal.util import binary_manager
from telemetry.internal.util import command_line
from telemetry.internal.util import ps_util
from telemetry.util import matching
from telemetry.util import bot_utils
# Right now, we only have one of each of our power perf bots. This means that
# all eligible Telemetry benchmarks are run unsharded, which results in very
# long (12h) cycle times. We'd like to reduce the number of tests that we run
# on each bot drastically until we get more of the same hardware to shard tests
# with, but we can't do so until we've verified that the hardware configuration
# is a viable one for Chrome Telemetry tests. This is done by seeing at least
# one all-green test run. As this happens for each bot, we'll add it to this
# whitelist, making it eligible to run only BattOr power tests.
GOOD_POWER_PERF_BOT_WHITELIST = [
"Mac Power Dual-GPU Perf",
"Mac Power Low-End Perf"
]
DEFAULT_LOG_FORMAT = (
'(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d '
'%(message)s')
def _IsBenchmarkEnabled(benchmark_class, possible_browser):
return (issubclass(benchmark_class, benchmark.Benchmark) and
decorators.IsBenchmarkEnabled(benchmark_class, possible_browser))
def PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout):
""" Print benchmarks that are not filtered in the same order of benchmarks in
the |benchmarks| list.
Args:
benchmarks: the list of benchmarks to be printed (in the same order of the
list).
possible_browser: the possible_browser instance that's used for checking
which benchmarks are enabled.
output_pipe: the stream in which benchmarks are printed on.
"""
if not benchmarks:
print >> output_pipe, 'No benchmarks found!'
return
bad_benchmark = next(
(b for b in benchmarks if not issubclass(b, benchmark.Benchmark)), None)
assert bad_benchmark is None, (
'|benchmarks| param contains non benchmark class: %s' % bad_benchmark)
# Align the benchmark names to the longest one.
format_string = ' %%-%ds %%s' % max(len(b.Name()) for b in benchmarks)
disabled_benchmarks = []
print >> output_pipe, 'Available benchmarks %sare:' % (
'for %s ' % possible_browser.browser_type if possible_browser else '')
# Sort the benchmarks by benchmark name.
benchmarks = sorted(benchmarks, key=lambda b: b.Name())
for b in benchmarks:
if not possible_browser or _IsBenchmarkEnabled(b, possible_browser):
print >> output_pipe, format_string % (b.Name(), b.Description())
else:
disabled_benchmarks.append(b)
if disabled_benchmarks:
print >> output_pipe, (
'\nDisabled benchmarks for %s are (force run with -d):' %
possible_browser.browser_type)
for b in disabled_benchmarks:
print >> output_pipe, format_string % (b.Name(), b.Description())
print >> output_pipe, (
'Pass --browser to list benchmarks for another browser.\n')
class Help(command_line.OptparseCommand):
"""Display help information about a command"""
usage = '[command]'
def __init__(self, commands):
self._all_commands = commands
def Run(self, args):
if len(args.positional_args) == 1:
commands = _MatchingCommands(args.positional_args[0], self._all_commands)
if len(commands) == 1:
command = commands[0]
parser = command.CreateParser()
command.AddCommandLineArgs(parser, None)
parser.print_help()
return 0
print >> sys.stderr, ('usage: %s [command] [<options>]' % _ScriptName())
print >> sys.stderr, 'Available commands are:'
for command in self._all_commands:
print >> sys.stderr, ' %-10s %s' % (
command.Name(), command.Description())
print >> sys.stderr, ('"%s help <command>" to see usage information '
'for a specific command.' % _ScriptName())
return 0
class List(command_line.OptparseCommand):
"""Lists the available benchmarks"""
usage = '[benchmark_name] [<options>]'
@classmethod
def CreateParser(cls):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
return parser
@classmethod
def AddCommandLineArgs(cls, parser, _):
parser.add_option('-j', '--json-output-file', type='string')
parser.add_option('-n', '--num-shards', type='int', default=1)
@classmethod
def ProcessCommandLineArgs(cls, parser, args, environment):
if not args.positional_args:
args.benchmarks = _Benchmarks(environment)
elif len(args.positional_args) == 1:
args.benchmarks = _MatchBenchmarkName(args.positional_args[0],
environment, exact_matches=False)
else:
parser.error('Must provide at most one benchmark name.')
def Run(self, args):
# Set at least log info level for List command.
# TODO(nedn): remove this once crbug.com/656224 is resolved. The recipe
# should be change to use verbose logging instead.
logging.getLogger().setLevel(logging.INFO)
possible_browser = browser_finder.FindBrowser(args)
if args.browser_type in (
'release', 'release_x64', 'debug', 'debug_x64', 'canary',
'android-chromium', 'android-chrome'):
args.browser_type = 'reference'
possible_reference_browser = browser_finder.FindBrowser(args)
else:
possible_reference_browser = None
if args.json_output_file:
with open(args.json_output_file, 'w') as f:
f.write(_GetJsonBenchmarkList(possible_browser,
possible_reference_browser,
args.benchmarks, args.num_shards))
else:
PrintBenchmarkList(args.benchmarks, possible_browser)
return 0
class Run(command_line.OptparseCommand):
"""Run one or more benchmarks (default)"""
usage = 'benchmark_name [page_set] [<options>]'
@classmethod
def CreateParser(cls):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
return parser
@classmethod
def AddCommandLineArgs(cls, parser, environment):
benchmark.AddCommandLineArgs(parser)
# Allow benchmarks to add their own command line options.
matching_benchmarks = []
for arg in sys.argv[1:]:
matching_benchmarks += _MatchBenchmarkName(arg, environment)
if matching_benchmarks:
# TODO(dtu): After move to argparse, add command-line args for all
# benchmarks to subparser. Using subparsers will avoid duplicate
# arguments.
matching_benchmark = matching_benchmarks.pop()
matching_benchmark.AddCommandLineArgs(parser)
# The benchmark's options override the defaults!
matching_benchmark.SetArgumentDefaults(parser)
@classmethod
def ProcessCommandLineArgs(cls, parser, args, environment):
all_benchmarks = _Benchmarks(environment)
if not args.positional_args:
possible_browser = (
browser_finder.FindBrowser(args) if args.browser_type else None)
PrintBenchmarkList(all_benchmarks, possible_browser)
sys.exit(-1)
input_benchmark_name = args.positional_args[0]
matching_benchmarks = _MatchBenchmarkName(input_benchmark_name, environment)
if not matching_benchmarks:
print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
print >> sys.stderr
most_likely_matched_benchmarks = matching.GetMostLikelyMatchedObject(
all_benchmarks, input_benchmark_name, lambda x: x.Name())
if most_likely_matched_benchmarks:
print >> sys.stderr, 'Do you mean any of those benchmarks below?'
PrintBenchmarkList(most_likely_matched_benchmarks, None, sys.stderr)
sys.exit(-1)
if len(matching_benchmarks) > 1:
print >> sys.stderr, ('Multiple benchmarks named "%s".' %
input_benchmark_name)
print >> sys.stderr, 'Did you mean one of these?'
print >> sys.stderr
PrintBenchmarkList(matching_benchmarks, None, sys.stderr)
sys.exit(-1)
benchmark_class = matching_benchmarks.pop()
if len(args.positional_args) > 1:
parser.error('Too many arguments.')
assert issubclass(benchmark_class, benchmark.Benchmark), (
'Trying to run a non-Benchmark?!')
benchmark.ProcessCommandLineArgs(parser, args)
benchmark_class.ProcessCommandLineArgs(parser, args)
cls._benchmark = benchmark_class
def Run(self, args):
return min(255, self._benchmark().Run(args))
def _ScriptName():
return os.path.basename(sys.argv[0])
def _MatchingCommands(string, commands):
return [command for command in commands
if command.Name().startswith(string)]
@decorators.Cache
def _Benchmarks(environment):
benchmarks = []
for search_dir in environment.benchmark_dirs:
benchmarks += discover.DiscoverClasses(search_dir,
environment.top_level_dir,
benchmark.Benchmark,
index_by_class_name=True).values()
return benchmarks
def _MatchBenchmarkName(input_benchmark_name, environment, exact_matches=True):
def _Matches(input_string, search_string):
if search_string.startswith(input_string):
return True
for part in search_string.split('.'):
if part.startswith(input_string):
return True
return False
# Exact matching.
if exact_matches:
# Don't add aliases to search dict, only allow exact matching for them.
if input_benchmark_name in environment.benchmark_aliases:
exact_match = environment.benchmark_aliases[input_benchmark_name]
else:
exact_match = input_benchmark_name
for benchmark_class in _Benchmarks(environment):
if exact_match == benchmark_class.Name():
return [benchmark_class]
return []
# Fuzzy matching.
return [benchmark_class for benchmark_class in _Benchmarks(environment)
if _Matches(input_benchmark_name, benchmark_class.Name())]
def GetBenchmarkByName(name, environment):
matched = _MatchBenchmarkName(name, environment, exact_matches=True)
# With exact_matches, len(matched) is either 0 or 1.
if len(matched) == 0:
return None
return matched[0]
def _GetJsonBenchmarkList(possible_browser, possible_reference_browser,
benchmark_classes, num_shards):
"""Returns a list of all enabled benchmarks in a JSON format expected by
buildbots.
JSON format:
{ "version": <int>,
"steps": {
<string>: {
"device_affinity": <int>,
"cmd": <string>,
"perf_dashboard_id": <string>,
},
...
}
}
"""
# TODO(charliea): Remove this once we have more power perf bots.
only_run_battor_benchmarks = False
print 'Environment variables: ', os.environ
if os.environ.get('BUILDBOT_BUILDERNAME') in GOOD_POWER_PERF_BOT_WHITELIST:
only_run_battor_benchmarks = True
output = {
'version': 1,
'steps': {
}
}
for benchmark_class in benchmark_classes:
if not _IsBenchmarkEnabled(benchmark_class, possible_browser):
continue
base_name = benchmark_class.Name()
# TODO(charliea): Remove this once we have more power perf bots.
# Only run battor power benchmarks to reduce the cycle time of this bot.
# TODO(rnephew): Enable media.* and power.* tests when Mac BattOr issue
# is solved.
if only_run_battor_benchmarks and not base_name.startswith('battor'):
continue
base_cmd = [sys.executable, os.path.realpath(sys.argv[0]),
'-v', '--output-format=chartjson', '--upload-results',
base_name]
perf_dashboard_id = base_name
device_affinity = bot_utils.GetDeviceAffinity(num_shards, base_name)
output['steps'][base_name] = {
'cmd': ' '.join(base_cmd + [
'--browser=%s' % possible_browser.browser_type]),
'device_affinity': device_affinity,
'perf_dashboard_id': perf_dashboard_id,
}
if (possible_reference_browser and
_IsBenchmarkEnabled(benchmark_class, possible_reference_browser)):
output['steps'][base_name + '.reference'] = {
'cmd': ' '.join(base_cmd + [
'--browser=reference', '--output-trace-tag=_ref']),
'device_affinity': device_affinity,
'perf_dashboard_id': perf_dashboard_id,
}
return json.dumps(output, indent=2, sort_keys=True)
def main(environment, extra_commands=None, **log_config_kwargs):
# The log level is set in browser_options.
log_config_kwargs.pop('level', None)
log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT)
logging.basicConfig(**log_config_kwargs)
ps_util.EnableListingStrayProcessesUponExitHook()
# Get the command name from the command line.
if len(sys.argv) > 1 and sys.argv[1] == '--help':
sys.argv[1] = 'help'
command_name = 'run'
for arg in sys.argv[1:]:
if not arg.startswith('-'):
command_name = arg
break
# TODO(eakuefner): Remove this hack after we port to argparse.
if command_name == 'help' and len(sys.argv) > 2 and sys.argv[2] == 'run':
command_name = 'run'
sys.argv[2] = '--help'
if extra_commands is None:
extra_commands = []
all_commands = [Help, List, Run] + extra_commands
# Validate and interpret the command name.
commands = _MatchingCommands(command_name, all_commands)
if len(commands) > 1:
print >> sys.stderr, ('"%s" is not a %s command. Did you mean one of these?'
% (command_name, _ScriptName()))
for command in commands:
print >> sys.stderr, ' %-10s %s' % (
command.Name(), command.Description())
return 1
if commands:
command = commands[0]
else:
command = Run
binary_manager.InitDependencyManager(environment.client_configs)
# Parse and run the command.
parser = command.CreateParser()
command.AddCommandLineArgs(parser, environment)
# Set the default chrome root variable.
parser.set_defaults(chrome_root=environment.default_chrome_root)
if isinstance(parser, argparse.ArgumentParser):
commandline_args = sys.argv[1:]
options, args = parser.parse_known_args(commandline_args[1:])
command.ProcessCommandLineArgs(parser, options, args, environment)
else:
options, args = parser.parse_args()
if commands:
args = args[1:]
options.positional_args = args
command.ProcessCommandLineArgs(parser, options, environment)
if command == Help:
command_instance = command(all_commands)
else:
command_instance = command()
if isinstance(command_instance, command_line.OptparseCommand):
return command_instance.Run(options)
else:
return command_instance.Run(options, args)
| {
"content_hash": "4d8b034cd13f2358b478fd99e034f1c3",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 80,
"avg_line_length": 35.437209302325584,
"alnum_prop": 0.674891718073238,
"repo_name": "sahiljain/catapult",
"id": "ae5020e0c1d5cff41da60439afa4d75342349c8d",
"size": "15401",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "telemetry/telemetry/benchmark_runner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6390"
},
{
"name": "CSS",
"bytes": "24751"
},
{
"name": "HTML",
"bytes": "14570791"
},
{
"name": "JavaScript",
"bytes": "511007"
},
{
"name": "Python",
"bytes": "5842419"
},
{
"name": "Shell",
"bytes": "2834"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import numpy
from six.moves import range
def snap_to_cube(q_start, q_stop, chunk_depth=16, q_index=1):
"""
For any q in {x, y, z, t}
Takes in a q-range and returns a 1D bound that starts at a cube
boundary and ends at another cube boundary and includes the volume
inside the bounds. For instance, snap_to_cube(2, 3) = (1, 17)
Arguments:
q_start (int): The lower bound of the q bounding box of volume
q_stop (int): The upper bound of the q bounding box of volume
chunk_depth (int : CHUNK_DEPTH) The size of the chunk in this
volume (use ``get_info()``)
q_index (int : 1): The starting index of the volume (in q)
Returns:
2-tuple: (lo, hi) bounding box for the volume
"""
lo = 0
hi = 0
# Start by indexing everything at zero for our own sanity
q_start -= q_index
q_stop -= q_index
if q_start % chunk_depth == 0:
lo = q_start
else:
lo = q_start - (q_start % chunk_depth)
if q_stop % chunk_depth == 0:
hi = q_stop
else:
hi = q_stop + (chunk_depth - q_stop % chunk_depth)
return [lo + q_index, hi + q_index + 1]
def block_compute(x_start, x_stop,
y_start, y_stop,
z_start, z_stop,
origin=(0, 0, 1),
block_size=(256, 256, 16)):
"""
Get bounding box coordinates (in 3D) of small cutouts to request in
order to reconstitute a larger cutout.
Arguments:
x_start (int): The lower bound of dimension x
x_stop (int): The upper bound of dimension x
y_start (int): The lower bound of dimension y
y_stop (int): The upper bound of dimension y
z_start (int): The lower bound of dimension z
z_stop (int): The upper bound of dimension z
Returns:
[((x_start, x_stop), (y_start, y_stop), (z_start, z_stop)), ... ]
"""
# x
x_bounds = range(origin[0], x_stop + block_size[0], block_size[0])
x_bounds = [x for x in x_bounds if (x > x_start and x < x_stop)]
if len(x_bounds) is 0:
x_slices = [(x_start, x_stop)]
else:
x_slices = []
for start_x in x_bounds[:-1]:
x_slices.append((start_x, start_x + block_size[0]))
x_slices.append((x_start, x_bounds[0]))
x_slices.append((x_bounds[-1], x_stop))
# y
y_bounds = range(origin[1], x_stop + block_size[1], block_size[1])
y_bounds = [y for y in y_bounds if (y > y_start and y < y_stop)]
if len(y_bounds) is 0:
y_slices = [(y_start, y_stop)]
else:
y_slices = []
for start_y in y_bounds[:-1]:
y_slices.append((start_y, start_y + block_size[1]))
y_slices.append((y_start, y_bounds[0]))
y_slices.append((y_bounds[-1], y_stop))
# z
z_bounds = range(origin[2], z_stop + block_size[2], block_size[2])
z_bounds = [z for z in z_bounds if (z > z_start and z < z_stop)]
if len(z_bounds) is 0:
z_slices = [(z_start, z_stop)]
else:
z_slices = []
for start_z in z_bounds[:-1]:
z_slices.append((start_z, start_z + block_size[2]))
z_slices.append((z_start, z_bounds[0]))
z_slices.append((z_bounds[-1], z_stop))
# alright, yuck. but now we have {x, y, z}_slices, each of which hold the
# start- and end-points of each cube-aligned boundary. For instance, if you
# requested z-slices 4 through 20, it holds [(4, 16), (16, 20)].
# For my next trick, I'll convert these to a list of:
# ((x_start, x_stop), (y_start, y_stop), (z_start, z_stop))
chunks = []
for x in x_slices:
for y in y_slices:
for z in z_slices:
chunks.append((x, y, z))
return chunks
| {
"content_hash": "c6899c71a8171e0092c1a145b20c0ad3",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 79,
"avg_line_length": 34.207207207207205,
"alnum_prop": 0.5609691861996313,
"repo_name": "neurodata/ndio",
"id": "292735f5c9d0bd057a4ba456e37ef2e76c329c2e",
"size": "3797",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ndio/utils/parallel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "484"
},
{
"name": "HTML",
"bytes": "399"
},
{
"name": "Python",
"bytes": "201982"
},
{
"name": "Shell",
"bytes": "314"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os, sys, re
def getTerminalSize():
"""
returns (lines:int, cols:int)
"""
import os, struct
def ioctl_GWINSZ(fd):
import fcntl, termios
return struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
# try stdin, stdout, stderr
if (not sys.stdout.isatty()):
return (25, 80)
fd = 1
try:
return ioctl_GWINSZ(fd)
except:
pass
# try os.ctermid()
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
try:
return ioctl_GWINSZ(fd)
finally:
os.close(fd)
except:
pass
# try `stty size`
try:
return tuple(int(x) for x in os.popen("stty size", "r").read().split())
except:
pass
# try environment variables
try:
return tuple(int(os.getenv(var)) for var in ("LINES", "COLUMNS"))
except:
pass
# i give up. return default.
return (25, 80)
def main():
rows, columns = getTerminalSize()
s = "rows: "+str(rows)+" columns: "+str(columns)+"\n"
sys.stderr.write(s)
if ( __name__ == '__main__'): main()
| {
"content_hash": "d6d7aa72b657c48b1c06fea2092492a6",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 20.58823529411765,
"alnum_prop": 0.6,
"repo_name": "rtmclay/Themis",
"id": "28ee37b85d8667d3ce9c8b14ba10c01dff5f69bf",
"size": "1089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/getTerminalSize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3170"
},
{
"name": "Makefile",
"bytes": "5769"
},
{
"name": "Python",
"bytes": "75215"
},
{
"name": "Shell",
"bytes": "1067"
}
],
"symlink_target": ""
} |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
FLAHOO_FLICKR_API_KEY = "Insert your Flickr API key here"
FLAHOO_TOTAL_TAGS = 3
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ne2vc53*82ai8*ika=u^$h+4b9y(w95k2l8zkd6%mj9f3dfojx'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'flahoo.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/path/to/flahoo/templates/'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
)
| {
"content_hash": "95e5940c53655c129e1aa0c1e5254cc7",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 101,
"avg_line_length": 35.24390243902439,
"alnum_prop": 0.7093425605536332,
"repo_name": "remiprev/flahoo",
"id": "de018caae6254daaafd455a0a003bf55c860f140",
"size": "2929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flahoo/sample_settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23758"
}
],
"symlink_target": ""
} |
from qingcloud.cli.misc.utils import explode_array
from qingcloud.cli.iaas_client.actions.base import BaseAction
class AssociateAlarmPolicyAction(BaseAction):
action = 'AssociateAlarmPolicy'
command = 'associate-alarm-policy'
usage = '%(prog)s [-a <alarm_policy>...] [options] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument("-a", "--alarm-policy", dest="alarm_policy",
action="store", type=str, default='',
help="the ID of alarm policy.")
parser.add_argument("-r", "--resources", dest="resources",
action="store", type=str, default='',
help="the ID of resources you want to associate with alarm policy.")
parser.add_argument("-R", "--related-resource", dest="related_resource",
action="store", type=str, default=None,
help="when the network load balancer is bound,related_resource needs to specify a public network IP ID associated with this load balancer.")
@classmethod
def build_directive(cls, options):
if options.alarm_policy == '':
print('error: alarm_policy should be specified.')
return None
resources = explode_array(options.resources)
if not resources:
print('error: resources should be specified')
return None
directive = {
"alarm_policy": options.alarm_policy,
"resources": resources,
"related_resource": options.related_resource
}
return directive
| {
"content_hash": "68b436c40f85a63a229c5b763cb46cf0",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 168,
"avg_line_length": 38.69767441860465,
"alnum_prop": 0.5895432692307693,
"repo_name": "yunify/qingcloud-cli",
"id": "1683893c8f0257ee3a44faba8d1880abad385a24",
"size": "2496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qingcloud/cli/iaas_client/actions/alarm_policy/associate_alarm_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "852"
},
{
"name": "Python",
"bytes": "607642"
}
],
"symlink_target": ""
} |
def create_with_snapshotted_data_disk(
project_id: str, zone: str, instance_name: str, snapshot_link: str
):
"""
Create a new VM instance with Debian 10 operating system and data disk created from snapshot.
Args:
project_id: project ID or project number of the Cloud project you want to use.
zone: name of the zone to create the instance in. For example: "us-west3-b"
instance_name: name of the new virtual machine (VM) instance.
snapshot_link: link to the snapshot you want to use as the source of your
data disk in the form of: "projects/{project_name}/global/snapshots/{snapshot_name}"
Returns:
Instance object.
"""
newest_debian = get_image_from_family(
project="debian-cloud", family="debian-10"
)
disk_type = f"zones/{zone}/diskTypes/pd-standard"
disks = [
disk_from_image(disk_type, 10, True, newest_debian.self_link),
disk_from_snapshot(disk_type, 11, False, snapshot_link),
]
instance = create_instance(project_id, zone, instance_name, disks)
return instance
# </INGREDIENT>
| {
"content_hash": "0f05c9b3f6922f4967bc08ea1ac69775",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 97,
"avg_line_length": 41.25925925925926,
"alnum_prop": 0.6660682226211849,
"repo_name": "googleapis/python-compute",
"id": "ea87201e5f1729a412c86b19cdf6e194e80acdfe",
"size": "2012",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "samples/ingredients/instances/create_start_instance/create_with_snapshotted_data_disk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "32681847"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
} |
import logging
from random import choice
import fbvoting.apis.fb as fbapi
from fbvoting.db.users import users_with_missing_votes
import fbvoting.db.notifications as db
logger = logging.getLogger(__name__)
def send_notifications():
logger.info("Notifications about missing votes: querying db...")
notificationType = db.NotificationType.missing_votes
logger.info("Notifications about missing votes: db has been queried.")
app_api = fbapi.get_application_token_api()
n_sent, n_skipped = 0, 0
for user, missing_votes in users_with_missing_votes():
logger.debug('user %s has missing votes %s', user, missing_votes)
if db.is_last_notification_expired(user, notificationType):
logger.debug('user %s has notification expired: we can send one', user)
if fbapi.has_app_installed(user):
category = choice(list(missing_votes))
logger.debug("user %s has also app installed: notifying for category %s", user, category)
msg = "We are looking for %s music gurus: do you have an expert among your friends?" % category
href = "votes/" + category
app_api[user].notifications.post(template=msg, href=href)
db.mark_notification(user, notificationType)
n_sent+=1
else:
logger.warn("User %s should have been notified for its missing votes, but its app cannot be accessed.", user)
else:
n_skipped += 1
logger.info("Notifications about missing votes: %i sent, %i skipped.", n_sent, n_skipped)
| {
"content_hash": "21bb6277296c09c6a2a14e4894a230c0",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 125,
"avg_line_length": 43.18421052631579,
"alnum_prop": 0.6386349786715417,
"repo_name": "corradomonti/fbvoting",
"id": "d2c580d5bb2b00dd5fe21c5b3e16a38d2c231cd5",
"size": "1641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fbvoting/notifications/missingvotes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6175"
},
{
"name": "HTML",
"bytes": "41796"
},
{
"name": "Java",
"bytes": "37313"
},
{
"name": "JavaScript",
"bytes": "57106"
},
{
"name": "Python",
"bytes": "116182"
},
{
"name": "Ruby",
"bytes": "355"
},
{
"name": "Shell",
"bytes": "2134"
}
],
"symlink_target": ""
} |
import mock
from chroma_core.models import Volume, VolumeNode, ManagedHost, LNetConfiguration
from chroma_core.models import NetworkInterface, Nid, ServerProfile
from chroma_core.models import NTPConfiguration
from chroma_core.models import CorosyncConfiguration
from chroma_core.models import PacemakerConfiguration
from chroma_core.models import Command
from chroma_core.lib.cache import ObjectCache
from chroma_core.models import StorageResourceRecord
from chroma_core.services.log import log_register
from tests.unit.chroma_core.helpers import random_str
log = log_register("synthetic_objects")
def synthetic_volume(serial=None, with_storage=True, usable_for_lustre=True):
"""
Create a Volume and an underlying StorageResourceRecord
"""
volume = Volume.objects.create()
if not serial:
serial = "foobar%d" % volume.id
attrs = {"serial": serial, "size": 8192000}
if with_storage:
from chroma_core.lib.storage_plugin.manager import storage_plugin_manager
resource_class, resource_class_id = storage_plugin_manager.get_plugin_resource_class("linux", "ScsiDevice")
storage_resource, created = StorageResourceRecord.get_or_create_root(resource_class, resource_class_id, attrs)
volume.storage_resource = storage_resource
volume.usable_for_lustre = usable_for_lustre
volume.save()
return volume
def synthetic_volume_full(primary_host, secondary_hosts=None, usable_for_lustre=True):
"""
Create a Volume and some VolumeNodes
"""
secondary_hosts = [] if secondary_hosts is None else secondary_hosts
volume = synthetic_volume(usable_for_lustre=usable_for_lustre)
path = "/fake/path/%s" % volume.id
VolumeNode.objects.create(volume=volume, host=primary_host, path=path, primary=True)
for host in secondary_hosts:
VolumeNode.objects.create(volume=volume, host=host, path=path, primary=False)
return volume
def synthetic_host(
address=None, nids=list([]), storage_resource=False, fqdn=None, nodename=None, server_profile="test_profile"
):
"""
Create a ManagedHost + paraphernalia, with states set as if configuration happened successfully
:param storage_resource: If true, create a PluginAgentResources (additional overhead, only sometimes required)
"""
server_profile = ServerProfile.objects.get(name=server_profile)
if address is None:
address = random_str(postfix=".tld")
if fqdn is None:
fqdn = address
if nodename is None:
nodename = address
host = ManagedHost.objects.create(
address=address,
fqdn=fqdn,
nodename=nodename,
state="managed",
server_profile=server_profile,
immutable_state=not server_profile.managed if server_profile else False,
)
ObjectCache.add(ManagedHost, host)
lnet_configuration = synthetic_lnet_configuration(host, nids)
if server_profile.managed:
synthetic_ntp_configuration(host)
synthetic_corosync_configuration(host)
synthetic_pacemaker_configuration(host)
log.debug("synthetic_host: %s %s" % (address, lnet_configuration.get_nids()))
if storage_resource:
from chroma_core.lib.storage_plugin.manager import storage_plugin_manager
resource_class, resource_class_id = storage_plugin_manager.get_plugin_resource_class(
"linux", "PluginAgentResources"
)
StorageResourceRecord.get_or_create_root(
resource_class, resource_class_id, {"plugin_name": "linux", "host_id": host.id}
)
return host
def synthetic_lnet_configuration(host, nids):
lnet_configuration, _ = LNetConfiguration.objects.get_or_create(host=host)
ObjectCache.add(LNetConfiguration, lnet_configuration)
# Now delete any existing nids as we will recreate them if some have been requested.
Nid.objects.filter(lnet_configuration=lnet_configuration).delete()
if nids:
assert type(nids[0]) == Nid.Nid
lnet_configuration.state = "lnet_up"
interface_no = 0
for nid in nids:
try:
network_interface = NetworkInterface.objects.get(
host=host, name="eth%s" % interface_no, type=nid.lnd_type
)
network_interface.inet4_address = nid.nid_address
network_interface.inet4_prefix = 24
network_interface.state_up = True
except NetworkInterface.DoesNotExist:
network_interface = NetworkInterface.objects.create(
host=host,
name="eth%s" % interface_no,
type=nid.lnd_type,
inet4_address=nid.nid_address,
inet4_prefix=24,
state_up=True,
)
network_interface.save()
nid_record = Nid.objects.create(
lnet_configuration=lnet_configuration,
network_interface=network_interface,
lnd_network=nid.lnd_network,
lnd_type=nid.lnd_type,
)
nid_record.save()
interface_no += 1
else:
lnet_configuration.state = "lnet_unloaded"
lnet_configuration.save()
return lnet_configuration
def create_synthetic_device_info(host, mock_server, plugin):
"""Creates the data returned from plugins for integration test purposes. Only does lnet data because
at present that is all we need."""
# Default is an empty dict.
result = {}
# First see if there is any explicit mocked data
try:
result = mock_server["device-plugin"][plugin]
except KeyError:
pass
# This should come from the simulator, so I am adding to the state of this code.
# It is a little inconsistent because network devices come and go as nids come and go.
# really they should be consistent. But I am down to the wire on this and this preserves
# the current testing correctly. So it is not a step backwards.
if (plugin == "linux_network") and (result == {}):
interfaces = {}
nids = {}
if host.state.startswith("lnet"):
lnet_state = host.state
else:
lnet_state = "lnet_unloaded"
mock_nids = mock_server["nids"]
interface_no = 0
if mock_nids:
for nid in mock_nids:
name = "eth%s" % interface_no
interfaces[name] = {
"mac_address": "12:34:56:78:90:%s" % interface_no,
"inet4_address": nid.nid_address,
"inet6_address": "Need An inet6 Simulated Address",
"type": nid.lnd_type,
"rx_bytes": "24400222349",
"tx_bytes": "1789870413",
"up": True,
}
nids[name] = {
"nid_address": nid.nid_address,
"lnd_type": nid.lnd_type,
"lnd_network": nid.lnd_network,
"status": "?",
"refs": "?",
"peer": "?",
"rtr": "?",
"max": "?",
"tx": "?",
"min": "?",
}
interface_no += 1
result = {"interfaces": interfaces, "lnet": {"state": lnet_state, "nids": nids}}
return {plugin: result}
def _create_simple_synthetic_object(class_, **kwargs):
synthetic_object = class_(**kwargs)
synthetic_object.save()
ObjectCache.add(class_, synthetic_object)
return synthetic_object
def synthetic_ntp_configuration(host):
assert host.ntp_configuration == None
return _create_simple_synthetic_object(NTPConfiguration, host=host)
def synthetic_corosync_configuration(host):
assert host.corosync_configuration == None
return _create_simple_synthetic_object(CorosyncConfiguration, host=host)
def synthetic_pacemaker_configuration(host):
assert host.pacemaker_configuration == None
return _create_simple_synthetic_object(PacemakerConfiguration, host=host, state="started")
def parse_synthentic_device_info(host_id, data):
"""Parses the data returned from plugins for integration test purposes. On does lnet data because
at present that is all we need."""
# This creates nid tuples so it can use synthetic_host_create_lnet_configuration to do the
# actual writes to the database
for plugin, device_data in data.items():
if plugin == "linux_network":
if len(device_data["lnet"]["nids"]) > 0:
nid_tuples = []
for name, nid in device_data["lnet"]["nids"].items():
nid_tuples.append(Nid.Nid(nid["nid_address"], nid["lnd_type"], nid["lnd_network"]))
else:
nid_tuples = None
synthetic_lnet_configuration(ManagedHost.objects.get(id=host_id), nid_tuples)
def _synthetic_create_host_ssh(address, server_profile, root_pw=None, pkey=None, pkey_pw=None):
try:
host = ManagedHost.objects.get(address=address)
assert host.state == "undeployed"
except ManagedHost.DoesNotExist:
from tests.unit.chroma_core.helpers import MockAgentRpc
host_info = MockAgentRpc.mock_servers[address]
host = synthetic_host(
address,
fqdn=host_info["fqdn"],
nids=host_info["nids"],
nodename=host_info["nodename"],
server_profile=server_profile,
)
command = Command.objects.create(message="No-op", complete=True)
return host, command
create_host_ssh_patch = mock.patch(
"chroma_core.services.job_scheduler.job_scheduler_client.JobSchedulerClient.create_host_ssh",
new=mock.Mock(side_effect=_synthetic_create_host_ssh),
)
| {
"content_hash": "c752ab8c0db5bc1203a1e84ac1b981a4",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 118,
"avg_line_length": 33.8041237113402,
"alnum_prop": 0.6289519162346244,
"repo_name": "intel-hpdd/intel-manager-for-lustre",
"id": "fa28a03ff86e97719240764e09403fb0eecba8b0",
"size": "9837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/chroma_core/helpers/synthentic_objects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "20532"
},
{
"name": "Makefile",
"bytes": "20966"
},
{
"name": "Python",
"bytes": "6527307"
},
{
"name": "Roff",
"bytes": "1415"
},
{
"name": "Ruby",
"bytes": "27697"
},
{
"name": "Shell",
"bytes": "127203"
}
],
"symlink_target": ""
} |
"""
sentry.utils.json
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import simplejson
import datetime
import uuid
class BetterJSONEncoder(DjangoJSONEncoder):
def default(self, obj):
if isinstance(obj, uuid.UUID):
return obj.hex
elif isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S.%f%Z')
elif isinstance(obj, (set, frozenset)):
return list(obj)
return super(BetterJSONEncoder, self).default(obj)
def better_decoder(data):
return data
def dumps(value, **kwargs):
return simplejson.dumps(value, cls=BetterJSONEncoder, **kwargs)
def loads(value, **kwargs):
return simplejson.loads(value, object_hook=better_decoder)
| {
"content_hash": "8bf1738f3d6a896264d4b9b58a332ced",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 70,
"avg_line_length": 25.11111111111111,
"alnum_prop": 0.6769911504424779,
"repo_name": "Kronuz/django-sentry",
"id": "fa17433802d77edc450a3ca8d0985ea90e068765",
"size": "904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentry/utils/json.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "134924"
},
{
"name": "JavaScript",
"bytes": "77963"
},
{
"name": "Python",
"bytes": "632636"
},
{
"name": "Shell",
"bytes": "4106"
}
],
"symlink_target": ""
} |
"""Test the abandontransaction RPC.
The abandontransaction RPC marks a transaction and all its in-wallet
descendants as abandoned which allows their inputs to be respent. It can be
used to replace "stuck" or evicted transactions. It only works on transactions
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already abandoned.
"""
from decimal import Decimal
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class AbandonConflictTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.generate(self.nodes[1], COINBASE_MATURITY)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
self.sync_mempools()
self.generate(self.nodes[1], 1)
# Can not abandon non-wallet transaction
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', lambda: self.nodes[0].abandontransaction(txid='ff' * 32))
# Can not abandon confirmed transaction
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: self.nodes[0].abandontransaction(txid=txA))
newbalance = self.nodes[0].getbalance()
assert balance - newbalance < Decimal("0.001") #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
self.disconnect_nodes(0, 1)
# Identify the 10btc outputs
nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txA)["details"] if tx_out["amount"] == Decimal("10"))
nB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txB)["details"] if tx_out["amount"] == Decimal("10"))
nC = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txC)["details"] if tx_out["amount"] == Decimal("10"))
inputs = []
# spend 10btc outputs from txA and txB
inputs.append({"txid": txA, "vout": nA})
inputs.append({"txid": txB, "vout": nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txAB1)["details"] if tx_out["amount"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid": txAB1, "vout": nAB})
inputs.append({"txid": txC, "vout": nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# Create a child tx spending ABC2
signed3_change = Decimal("24.999")
inputs = [{"txid": txABC2, "vout": 0}]
outputs = {self.nodes[0].getnewaddress(): signed3_change}
signed3 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
# note tx is never directly referenced, only abandoned as a child of the above
self.nodes[0].sendrawtransaction(signed3["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + signed3_change)
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.restart_node(0, extra_args=["-minrelaytxfee=0.0001"])
assert self.nodes[0].getmempoolinfo()['loaded']
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - signed3_change)
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
balances = self.nodes[0].getbalances()['mine']
assert_equal(balances['untrusted_pending'] + balances['trusted'], newbalance)
# Also shouldn't show up in listunspent
assert not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
self.log.info("Check abandoned transactions in listsinceblock")
listsinceblock = self.nodes[0].listsinceblock()
txAB1_listsinceblock = [d for d in listsinceblock['transactions'] if d['txid'] == txAB1 and d['category'] == 'send']
for tx in txAB1_listsinceblock:
assert_equal(tx['abandoned'], True)
assert_equal(tx['confirmations'], 0)
assert_equal(tx['trusted'], False)
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.restart_node(0, extra_args=["-minrelaytxfee=0.00001"])
assert self.nodes[0].getmempoolinfo()['loaded']
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if it is received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so it is unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.restart_node(0, extra_args=["-minrelaytxfee=0.0001"])
assert self.nodes[0].getmempoolinfo()['loaded']
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
self.log.info("Test transactions conflicted by a double spend")
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs = []
inputs.append({"txid": txA, "vout": nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransactionwithwallet(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.generate(self.nodes[1], 1, sync_fun=self.no_op)
self.connect_nodes(0, 1)
self.sync_blocks()
tx_list = self.nodes[0].listtransactions()
conflicted = [tx for tx in tx_list if tx["confirmations"] < 0]
assert_equal(4, len(conflicted))
wallet_conflicts = [tx for tx in conflicted if tx["walletconflicts"]]
assert_equal(2, len(wallet_conflicts))
double_spends = [tx for tx in tx_list if tx["walletconflicts"] and tx["confirmations"] > 0]
assert_equal(1, len(double_spends))
double_spend = double_spends[0]
# Test the properties of the conflicted transactions, i.e. with confirmations < 0.
for tx in conflicted:
assert_equal(tx["abandoned"], False)
assert_equal(tx["confirmations"], -1)
assert_equal(tx["trusted"], False)
# Test the properties of the double-spend transaction, i.e. having wallet conflicts and confirmations > 0.
assert_equal(double_spend["abandoned"], False)
assert_equal(double_spend["confirmations"], 1)
assert "trusted" not in double_spend.keys() # "trusted" only returned if tx has 0 or negative confirmations.
# Test the walletconflicts field of each.
for tx in wallet_conflicts:
assert_equal(double_spend["walletconflicts"], [tx["txid"]])
assert_equal(tx["walletconflicts"], [double_spend["txid"]])
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
assert_equal(balance, newbalance)
if __name__ == '__main__':
AbandonConflictTest().main()
| {
"content_hash": "4f10ede83eb7952681af8c0935804d90",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 138,
"avg_line_length": 48.7196261682243,
"alnum_prop": 0.6556685210051794,
"repo_name": "mm-s/bitcoin",
"id": "27d9d8da8899e49524ebe4f51c0f07b6c95297bb",
"size": "10640",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/functional/wallet_abandonconflict.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1228370"
},
{
"name": "C++",
"bytes": "9407360"
},
{
"name": "CMake",
"bytes": "29132"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "1721"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "541"
},
{
"name": "M4",
"bytes": "247147"
},
{
"name": "Makefile",
"bytes": "136414"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2661378"
},
{
"name": "QMake",
"bytes": "438"
},
{
"name": "Sage",
"bytes": "56897"
},
{
"name": "Scheme",
"bytes": "24076"
},
{
"name": "Shell",
"bytes": "211674"
}
],
"symlink_target": ""
} |
from itertools import takewhile
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
readme = str.join('', takewhile(lambda l: not l.startswith('Installation'), f.readlines()[15:]))
setup(
name = 'overloading',
version = '0.5.0',
description = 'Function overloading for Python 3',
long_description = '\n' + readme,
url = 'https://github.com/bintoro/overloading.py',
author = 'Kalle Tuure',
author_email = 'kalle@goodtimes.fi',
license = 'MIT',
py_modules = ['overloading'],
install_requires = [],
keywords = 'overload function method dispatch',
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: MIT License'
]
)
| {
"content_hash": "d556b48fef20910955264caac89d53b1",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 100,
"avg_line_length": 33.46875,
"alnum_prop": 0.6143790849673203,
"repo_name": "bintoro/overloading.py",
"id": "bca344f3f50ecb6283474713eadee8c5ff33f7b1",
"size": "1071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36663"
}
],
"symlink_target": ""
} |
from .screens import Screens
class Abilities():
def __init__(self):
return
def update(self, screenType, screen):
pass
| {
"content_hash": "37f737a89261c701ae682b3575e2deaf",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 41,
"avg_line_length": 16.11111111111111,
"alnum_prop": 0.6068965517241379,
"repo_name": "nkhoit/dcss.py",
"id": "df76837c488ef7d94dc16865cb19c7e79a913dc3",
"size": "145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dcss/abilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37308"
}
],
"symlink_target": ""
} |
"""LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
import cStringIO as StringIO
import struct
import kinect_joint_t
class kinect_bodyframe_update_t(object):
__slots__ = ["bodyjoints"]
SpineBase = 0
SpineMid = 1
Neck = 2
Head = 3
ShoulderLeft = 4
ElbowLeft = 5
WristLeft = 6
HandLeft = 7
ShoulderRight = 8
ElbowRight = 9
WristRight = 10
HandRight = 11
HipLeft = 12
KneeLeft = 13
AnkleLeft = 14
FootLeft = 15
HipRight = 16
KneeRight = 17
AnkleRight = 18
FootRight = 19
SpineShoulder = 20
HandTipLeft = 21
ThumbLeft = 22
HandTipRight = 23
ThumbRight = 24
def __init__(self):
self.bodyjoints = [ None for dim0 in range(25) ]
def encode(self):
buf = StringIO.StringIO()
buf.write(kinect_bodyframe_update_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
for i0 in range(25):
assert self.bodyjoints[i0]._get_packed_fingerprint() == kinect_joint_t.kinect_joint_t._get_packed_fingerprint()
self.bodyjoints[i0]._encode_one(buf)
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = StringIO.StringIO(data)
if buf.read(8) != kinect_bodyframe_update_t._get_packed_fingerprint():
raise ValueError("Decode error")
return kinect_bodyframe_update_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = kinect_bodyframe_update_t()
self.bodyjoints = []
for i0 in range(25):
self.bodyjoints.append(kinect_joint_t.kinect_joint_t._decode_one(buf))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if kinect_bodyframe_update_t in parents: return 0
newparents = parents + [kinect_bodyframe_update_t]
tmphash = (0x833951cac4a4f9e8+ kinect_joint_t.kinect_joint_t._get_hash_recursive(newparents)) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if kinect_bodyframe_update_t._packed_fingerprint is None:
kinect_bodyframe_update_t._packed_fingerprint = struct.pack(">Q", kinect_bodyframe_update_t._get_hash_recursive([]))
return kinect_bodyframe_update_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
| {
"content_hash": "40fcac4dad7671abac09b797dd6a83fe",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 128,
"avg_line_length": 31.04597701149425,
"alnum_prop": 0.6360607182524991,
"repo_name": "GearsAD/semisorted_arnerve",
"id": "347cab8d4612c336c4f3d81b3cee17599c9c7cd6",
"size": "2701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lcm_types/user_types/semisorted_arnerve/kinect_bodyframe_update_t.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "912872"
},
{
"name": "C#",
"bytes": "66637"
},
{
"name": "Java",
"bytes": "30714"
},
{
"name": "Python",
"bytes": "354774"
}
],
"symlink_target": ""
} |
import inspect
import os
from trove_guestagent.openstack.common import importutils
from trove_guestagent.openstack.common import loopingcall
from trove_guestagent.openstack.common.rpc import service as rpc_service
from trove_guestagent.common import cfg
CONF = cfg.CONF
class RpcService(rpc_service.Service):
def __init__(self, host=None, binary=None, topic=None, manager=None):
host = host or CONF.host
binary = binary or os.path.basename(inspect.stack()[-1][1])
topic = topic or binary.rpartition('trove-')[2]
self.manager_impl = importutils.import_object(manager)
self.report_interval = CONF.report_interval
super(RpcService, self).__init__(host, topic,
manager=self.manager_impl)
def start(self):
super(RpcService, self).start()
# TODO(hub-cap): Currently the context is none... do we _need_ it here?
pulse = loopingcall.LoopingCall(self.manager_impl.run_periodic_tasks,
context=None)
pulse.start(interval=self.report_interval,
initial_delay=self.report_interval)
pulse.wait()
| {
"content_hash": "0aff864403e5d6e53f741c669b9c03ab",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 79,
"avg_line_length": 39.46666666666667,
"alnum_prop": 0.6494932432432432,
"repo_name": "denismakogon/trove-guestagent",
"id": "d22a92a595fa0712f869ac1f042659fd9b4f0ff5",
"size": "1916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove_guestagent/common/rpc/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19900"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1023022"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from storyboard.stories.models import Comment
from storyboard.stories.models import Story
from storyboard.stories.models import StoryTag
from storyboard.stories.models import Task
admin.site.register(Story)
admin.site.register(Task)
admin.site.register(Comment)
admin.site.register(StoryTag)
| {
"content_hash": "d1afc4eff7f4d0b5a2450568a214fab3",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 46,
"avg_line_length": 29.727272727272727,
"alnum_prop": 0.8440366972477065,
"repo_name": "Konovalov-Nik/storyboard",
"id": "d1686dcb67dde7ac5fd95d3755827fa2eaff18ff",
"size": "981",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "storyboard/stories/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "6055"
},
{
"name": "Python",
"bytes": "45350"
}
],
"symlink_target": ""
} |
from collections import namedtuple
import re
import requests
from requests_ntlm import HttpNtlmAuth
from bs4 import BeautifulSoup
BASE_URL = 'https://uniflow.calvin.edu/'
CLIENT_PATH = 'pwclient/'
RQM_PATH = 'pwrqm/'
AUTH_PATH = 'getuserid.asp'
PRINT_BUDGET_PATH = 'dispBudget.asp'
PRINT_QUEUE_PATH = 'dispObjects.asp'
def get_uniflow_client(username, password):
return _UniflowClient(username, password)
class _UniflowClient:
def __init__(self, username, password):
if username is None or username == '':
# blank user name leads to an invalid ntlm domain
raise InvalidCredentialsError('User name must not be blank.')
self._username = username
self._password = password
self._budget_scraper = _BudgetScraper()
self._queue_scraper = _QueueScraper()
# Verify the credentials
self._budget_scraper.sign_in(self._username, self._password)
self._queue_scraper.sign_in(self._username, self._password)
def get_budget(self):
self._budget_scraper.sign_in(self._username, self._password)
return self._budget_scraper.fetch_data()
def get_print_queue(self):
self._queue_scraper.sign_in(self._username, self._password)
return self._queue_scraper.fetch_data()
PrintJob = namedtuple('PrintJob',['name', 'pages', 'copies',
'price', 'printer_name', 'date'])
class _PrintScraper:
"""Parent class of BudgetScraper and QueueScraper.
"""
def sign_in(self, path, username, password):
domain = ''
try:
self._session.get(BASE_URL + path)
self._session.auth = HttpNtlmAuth(domain + '\\' + username,
password, self._session)
post_data = {'theAction': 'ntlogin'}
response = self._session.post(BASE_URL + path + AUTH_PATH,
data=post_data)
except requests.exceptions.ConnectionError as err:
raise NetworkError(err)
except requests.exceptions.HTTPError as err:
raise NetworkError(err)
except requests.exceptions.RequestException as err:
raise NetworkError(err)
if response.status_code != requests.codes.ok:
raise InvalidCredentialsError
self.update_token(response.text)
class _BudgetScraper(_PrintScraper):
"""Stores a session with print.calvin.edu/pwclient, and a token.
"""
def __init__(self):
self.path = CLIENT_PATH
self._session = requests.Session()
def update_token(self, text):
match = re.search(r"\.c_updateToken\(\"(.*)\"\)", text)
if match is not None:
self._token = match.group(1)
return
raise ScrapingError("No token found.")
def sign_in(self, username, password):
_PrintScraper.sign_in(self, self.path, username, password)
def fetch_data(self):
"""Returns the budget of the user.
"""
query_parameters = {
'mmtype': 'budget',
'smtype': '',
'token': self._token
}
try:
response = self._session.get(BASE_URL + self.path + PRINT_BUDGET_PATH,
params=query_parameters)
except requests.exceptions.ConnectionError as err:
raise NetworkError(err)
except requests.exceptions.HTTPError as err:
raise NetworkError(err)
except requests.exceptions.RequestException as err:
raise NetworkError(err)
if response.status_code != requests.codes.ok:
raise ScrapingError("Invalid HTTP status code: {}".format(response.status_code))
soup = BeautifulSoup(response.text, 'lxml')
title = soup.find('title')
if title is None:
raise ScrapingError("Page has no title.")
budget_tag = soup.find('font', class_= 'editHeadline')
parent_tag = soup.find('font', class_= 'editHeadline').parent
match = re.search(r"Your current budget is:", str(parent_tag))
if match is None:
raise ScrapingError("Budget not found.")
try:
budget = float(budget_tag.string)
except ValueError:
raise ScrapingError("Budget is not a valid float: {}".format(budget_tag.string))
return budget
class _QueueScraper(_PrintScraper):
"""Stores a session with print.calvin.edu/pwrqm, and a token.
"""
def __init__(self):
self.path = RQM_PATH
self._session = requests.Session()
def sign_in(self, username, password):
_PrintScraper.sign_in(self, self.path, username, password)
def update_token(self, text):
match = re.search(r"token=(.*)\";", text)
if match is not None:
self._token = match.group(1)
return
raise ScrapingError("No token found.")
def fetch_data(self):
"""Returns a list of _PrintJob objects to represent a user's print queue."""
query_parameters = {
'mmtype': 'login',
'smtype': '',
'token': self._token
}
try:
response = self._session.get(BASE_URL + self.path + PRINT_QUEUE_PATH,
params=query_parameters)
except requests.exceptions.ConnectionError as err:
raise NetworkError(err)
except requests.exceptions.HTTPError as err:
raise NetworkError(err)
except requests.exceptions.RequestException as err:
raise NetworkError(err)
if response.status_code != requests.codes.ok:
raise ScrapingError("Invalid HTTP status code: {}".format(response.status_code))
soup = BeautifulSoup(response.text, 'lxml')
#TODO: Come up with a better way for checking for scraping errors
title = soup.find('title')
if title is None:
raise ScrapingError("Page has no title.")
print_job_tags = soup.select('#divMain tr td.Middle')
print_jobs = []
numberOfJobs = len(print_job_tags) / 7
for i in range(numberOfJobs):
j = i * 7
try:
name = unicode(print_job_tags[0 + j].string)
pages = int(print_job_tags[1 + j].string)
copies = int(print_job_tags[2 + j].string)
price = float(print_job_tags[3 + j].string)
printer_name = unicode(print_job_tags[4 + j].string)
date = unicode(print_job_tags[6 + j].string)
except IndexError as err:
raise ScrapingError(err)
except ValueError as err:
raise ScrapingError(err)
print_jobs.append(PrintJob(name, pages, copies, price, printer_name, date))
return print_jobs
class ScrapingError(Exception):
pass
class NetworkError(Exception):
pass
class InvalidCredentialsError(Exception):
pass
| {
"content_hash": "61400f066a2b1af21e51c9981d8fba9c",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 92,
"avg_line_length": 36.83157894736842,
"alnum_prop": 0.5923120891683338,
"repo_name": "beni55/calvinwebprint",
"id": "582cfc9eec450ca4da04968905f5db82c2744192",
"size": "6998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/printapp/printstatus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1215"
},
{
"name": "HTML",
"bytes": "22366"
},
{
"name": "JavaScript",
"bytes": "20246"
},
{
"name": "Python",
"bytes": "64899"
}
],
"symlink_target": ""
} |
import logging
import time
import traceback
from datetime import datetime, timedelta
import requests
import simplejson as json
from django.conf import settings
from django.core.management.base import BaseCommand
from redis import ConnectionPool, StrictRedis
from sutrofm.redis_models import Party, Message
redis_connection_pool = ConnectionPool(**settings.WS4REDIS_CONNECTION)
logger = logging.getLogger(__name__)
WAIT_FOR_USERS = timedelta(minutes=5)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('room_id', type=str)
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.redis = None
self.party = None
self.party_id = None
self.currently_playing = None
self.current_track_duration = None
self.current_start_time = None
self.keep_running = True
def handle(self, room_id, *args, **kwargs):
self.party_id = room_id
self.redis = StrictRedis(connection_pool=redis_connection_pool)
self.party = Party.get(self.redis, room_id)
self.currently_playing = None
self.current_track_duration = None
self.current_start_time = None
self.play_track(self.party.playing_track_key)
self.run()
def run(self):
logger.debug('Starting up master process for party "%s"!', self.party_id)
while self.keep_running:
try:
self.keep_running = self.tick()
except Exception as ex:
print ex
print(traceback.format_exc())
logger.exception("!!! ALERT !!! Master... More like Crashster.")
time.sleep(1)
def get_duration(self, track_key):
response = requests.post('https://services.rdio.com/api/1/get', {
'keys': track_key,
'method': 'get',
'access_token': settings.RDIO_ACCESS_TOKEN
})
return json.loads(response.text)['result'][track_key]['duration']
def play_track(self, track_key):
self.current_track_duration = None
self.current_start_time = None
self.currently_playing = None
if track_key:
self.currently_playing = track_key
self.current_track_duration = self.get_duration(track_key)
self.current_start_time = self.party.playing_track_start_time
def play_next_track(self):
# Refresh party data
self.party.play_next_track()
self.party.save(self.redis)
was_playing = self.currently_playing
self.play_track(self.party.playing_track_key)
if was_playing != self.currently_playing:
self.send_play_track_message(self.currently_playing)
self.party.broadcast_player_state(self.redis)
self.party.broadcast_queue_state(self.redis)
def send_play_track_message(self, rdio_track_key):
message = Message.make_now_playing_message(self.redis, self.party, rdio_track_key)
message.save(self.redis)
self.party.broadcast_message_added(self.redis, message)
def tick(self):
# Refresh the party data
self.party = Party.get(self.redis, self.party_id)
position = (datetime.utcnow() - (self.current_start_time or datetime.utcnow())).seconds
if (not self.currently_playing) or (position > self.current_track_duration) or self.party.should_skip():
self.play_next_track()
self.party.broadcast_user_list_state(self.redis)
return self.should_keep_running()
def should_keep_running(self):
""" Kill if no one is online in the room any more """
return len(self.party.active_users())
| {
"content_hash": "c8b22f784d646cb7316e934ef2d6c6ab",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 108,
"avg_line_length": 32.15094339622642,
"alnum_prop": 0.6948356807511737,
"repo_name": "superemily/sutrofm",
"id": "eb7eb557f35eede79bdb999d44fc00f0dbe79624",
"size": "3408",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sutrofm/management/commands/master.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11614"
},
{
"name": "HTML",
"bytes": "19771"
},
{
"name": "JavaScript",
"bytes": "57695"
},
{
"name": "Python",
"bytes": "51885"
},
{
"name": "Shell",
"bytes": "813"
}
],
"symlink_target": ""
} |
import sys
import antlr3
from TLexer import TLexer
from TParser import TParser
cStream = antlr3.FileStream(sys.argv[1])
lexer = TLexer(cStream)
tStream = antlr3.TokenRewriteStream(lexer)
parser = TParser(tStream)
parser.program()
print tStream
| {
"content_hash": "0ab23e28fb3fb7b590f62d3369af9d8c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 42,
"avg_line_length": 22.272727272727273,
"alnum_prop": 0.8040816326530612,
"repo_name": "Sable/mclab-core",
"id": "66424d4c02d77d575f8225bc4db2dbbbdb417465",
"size": "245",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/antlr-3.0.1/runtime/Python/examples/tweak/tweak.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "387"
},
{
"name": "Batchfile",
"bytes": "559"
},
{
"name": "C",
"bytes": "466526"
},
{
"name": "C++",
"bytes": "19994"
},
{
"name": "CSS",
"bytes": "3032"
},
{
"name": "Emacs Lisp",
"bytes": "2535"
},
{
"name": "GAP",
"bytes": "320936"
},
{
"name": "HTML",
"bytes": "359828"
},
{
"name": "Java",
"bytes": "5242198"
},
{
"name": "Lex",
"bytes": "114133"
},
{
"name": "M4",
"bytes": "3863"
},
{
"name": "Makefile",
"bytes": "5969"
},
{
"name": "Matlab",
"bytes": "1207"
},
{
"name": "OCaml",
"bytes": "6276"
},
{
"name": "Objective-C",
"bytes": "723006"
},
{
"name": "Python",
"bytes": "569613"
},
{
"name": "Ruby",
"bytes": "21165"
},
{
"name": "Shell",
"bytes": "3574"
},
{
"name": "Smalltalk",
"bytes": "417"
},
{
"name": "VimL",
"bytes": "5978"
},
{
"name": "Yacc",
"bytes": "3743"
}
],
"symlink_target": ""
} |
list_of_values= [ace,two,three,four,five,six,seven,eight,nine,ten,jack,queen,king]
list_of_suites=["hearts","spades","diamonds","clubs"]
k=len(list_of_values)
s=len(list_of_suites)
list_of_cards = []
for p in list_of_suites:
for q in list_of_values:
cardd=(q, " of ", p)
list_of_cards.extend(cardd)
| {
"content_hash": "a351493b272e3eabe0e978f4001d9eb5",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 82,
"avg_line_length": 30.181818181818183,
"alnum_prop": 0.6355421686746988,
"repo_name": "jeremiahmarks/dangerzone",
"id": "0092f988145f9b0b2376747a374b80854d1d32b7",
"size": "339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/python/cards/cardsz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5082"
},
{
"name": "HTML",
"bytes": "2728663"
},
{
"name": "Java",
"bytes": "18658"
},
{
"name": "JavaScript",
"bytes": "4591"
},
{
"name": "PHP",
"bytes": "61100"
},
{
"name": "Python",
"bytes": "419882"
},
{
"name": "Ruby",
"bytes": "126786"
},
{
"name": "Shell",
"bytes": "130622"
}
],
"symlink_target": ""
} |
"""Concrete-table (table-per-class) inheritance example."""
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import or_
from sqlalchemy import String
from sqlalchemy.ext.declarative import ConcreteBase
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import with_polymorphic
Base = declarative_base()
class Company(Base):
__tablename__ = "company"
id = Column(Integer, primary_key=True)
name = Column(String(50))
employees = relationship(
"Person", back_populates="company", cascade="all, delete-orphan"
)
def __repr__(self):
return "Company %s" % self.name
class Person(ConcreteBase, Base):
__tablename__ = "person"
id = Column(Integer, primary_key=True)
company_id = Column(ForeignKey("company.id"))
name = Column(String(50))
company = relationship("Company", back_populates="employees")
__mapper_args__ = {"polymorphic_identity": "person"}
def __repr__(self):
return "Ordinary person %s" % self.name
class Engineer(Person):
__tablename__ = "engineer"
id = Column(Integer, primary_key=True)
name = Column(String(50))
company_id = Column(ForeignKey("company.id"))
status = Column(String(30))
engineer_name = Column(String(30))
primary_language = Column(String(30))
company = relationship("Company", back_populates="employees")
__mapper_args__ = {"polymorphic_identity": "engineer", "concrete": True}
def __repr__(self):
return (
"Engineer %s, status %s, engineer_name %s, "
"primary_language %s"
% (
self.name,
self.status,
self.engineer_name,
self.primary_language,
)
)
class Manager(Person):
__tablename__ = "manager"
id = Column(Integer, primary_key=True)
name = Column(String(50))
company_id = Column(ForeignKey("company.id"))
status = Column(String(30))
manager_name = Column(String(30))
company = relationship("Company", back_populates="employees")
__mapper_args__ = {"polymorphic_identity": "manager", "concrete": True}
def __repr__(self):
return "Manager %s, status %s, manager_name %s" % (
self.name,
self.status,
self.manager_name,
)
engine = create_engine("sqlite://", echo=True)
Base.metadata.create_all(engine)
session = Session(engine)
c = Company(
name="company1",
employees=[
Manager(
name="pointy haired boss", status="AAB", manager_name="manager1"
),
Engineer(
name="dilbert",
status="BBA",
engineer_name="engineer1",
primary_language="java",
),
Person(name="joesmith"),
Engineer(
name="wally",
status="CGG",
engineer_name="engineer2",
primary_language="python",
),
Manager(name="jsmith", status="ABA", manager_name="manager2"),
],
)
session.add(c)
session.commit()
c = session.query(Company).get(1)
for e in c.employees:
print(e, inspect(e).key, e.company)
assert set([e.name for e in c.employees]) == set(
["pointy haired boss", "dilbert", "joesmith", "wally", "jsmith"]
)
print("\n")
dilbert = session.query(Person).filter_by(name="dilbert").one()
dilbert2 = session.query(Engineer).filter_by(name="dilbert").one()
assert dilbert is dilbert2
dilbert.engineer_name = "hes dilbert!"
session.commit()
c = session.query(Company).get(1)
for e in c.employees:
print(e)
# query using with_polymorphic.
eng_manager = with_polymorphic(Person, [Engineer, Manager])
print(
session.query(eng_manager)
.filter(
or_(
eng_manager.Engineer.engineer_name == "engineer1",
eng_manager.Manager.manager_name == "manager2",
)
)
.all()
)
# illustrate join from Company
eng_manager = with_polymorphic(Person, [Engineer, Manager])
print(
session.query(Company)
.join(Company.employees.of_type(eng_manager))
.filter(
or_(
eng_manager.Engineer.engineer_name == "engineer1",
eng_manager.Manager.manager_name == "manager2",
)
)
.all()
)
session.commit()
| {
"content_hash": "c470666bc1b155d7f8960aaed02d2c63",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 76,
"avg_line_length": 26.07017543859649,
"alnum_prop": 0.6211305518169583,
"repo_name": "j5int/sqlalchemy",
"id": "4eb89984a0b6336d3c654cc7c165e71752566a67",
"size": "4458",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "examples/inheritance/concrete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "63151"
},
{
"name": "Python",
"bytes": "15339979"
}
],
"symlink_target": ""
} |
"""Keras convolution layers and image transformation layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras import activations
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import constraints
from tensorflow.contrib.keras.python.keras import initializers
from tensorflow.contrib.keras.python.keras import regularizers
from tensorflow.contrib.keras.python.keras.engine import InputSpec
from tensorflow.contrib.keras.python.keras.engine import Layer
# imports for backwards namespace compatibility
# pylint: disable=unused-import
from tensorflow.contrib.keras.python.keras.layers.pooling import AveragePooling1D
from tensorflow.contrib.keras.python.keras.layers.pooling import AveragePooling2D
from tensorflow.contrib.keras.python.keras.layers.pooling import AveragePooling3D
from tensorflow.contrib.keras.python.keras.layers.pooling import MaxPooling1D
from tensorflow.contrib.keras.python.keras.layers.pooling import MaxPooling2D
from tensorflow.contrib.keras.python.keras.layers.pooling import MaxPooling3D
# pylint: enable=unused-import
from tensorflow.contrib.keras.python.keras.utils import conv_utils
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import convolutional as tf_convolutional_layers
class Conv1D(tf_convolutional_layers.Conv1D, Layer):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide an `input_shape` argument
(tuple of integers or `None`, e.g.
`(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,
or `(None, 128)` for variable-length sequences of 128-dimensional vectors.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive).
`"causal"` results in causal (dilated) convolutions, e.g. output[t]
does not depend on input[t+1:]. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section
2.1](https://arxiv.org/abs/1609.03499).
dilation_rate: an integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format='channels_last',
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
# TODO(fchollet): move weight constraint support to core layers.
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
super(Conv1D, self).build(input_shape)
# TODO(fchollet): move weight constraint support to core layers.
if self.kernel_constraint:
self.constraints[self.kernel] = self.kernel_constraint
if self.use_bias and self.bias_constraint:
self.constraints[self.bias] = self.bias_constraint
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv2D(tf_convolutional_layers.Conv2D, Layer):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(Conv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
# TODO(fchollet): move weight constraint support to core layers.
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
super(Conv2D, self).build(input_shape)
# TODO(fchollet): move weight constraint support to core layers.
if self.kernel_constraint:
self.constraints[self.kernel] = self.kernel_constraint
if self.use_bias and self.bias_constraint:
self.constraints[self.bias] = self.bias_constraint
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv3D(tf_convolutional_layers.Conv3D, Layer):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel,
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along each spatial
dimension.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
5D tensor with shape:
`(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if
data_format='channels_last'.
Output shape:
5D tensor with shape:
`(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if
data_format='channels_last'.
`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have
changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(Conv3D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
# TODO(fchollet): move weight constraint support to core layers.
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
super(Conv3D, self).build(input_shape)
# TODO(fchollet): move weight constraint support to core layers.
if self.kernel_constraint:
self.constraints[self.kernel] = self.kernel_constraint
if self.use_bias and self.bias_constraint:
self.constraints[self.bias] = self.bias_constraint
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv2DTranspose(tf_convolutional_layers.Conv2DTranspose, Layer):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
# TODO(fchollet): move weight constraint support to core layers.
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
super(Conv2DTranspose, self).build(input_shape)
# TODO(fchollet): move weight constraint support to core layers.
if self.kernel_constraint:
self.constraints[self.kernel] = self.kernel_constraint
if self.use_bias and self.bias_constraint:
self.constraints[self.bias] = self.bias_constraint
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv2DTranspose, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv3DTranspose(tf_convolutional_layers.Conv3D, Layer):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
if `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
width and height of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to the kernel matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
Input shape:
5D tensor with shape:
`(batch, channels, depth, rows, cols)` if data_format='channels_first'
or 5D tensor with shape:
`(batch, depth, rows, cols, channels)` if data_format='channels_last'.
Output shape:
5D tensor with shape:
`(batch, filters, new_depth, new_rows, new_cols)` if
data_format='channels_first'
or 5D tensor with shape:
`(batch, new_depth, new_rows, new_cols, filters)` if
data_format='channels_last'.
`depth` and `rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(Conv3DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
# TODO(fchollet): move weight constraint support to core layers.
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
super(Conv3DTranspose, self).build(input_shape)
# TODO(fchollet): move weight constraint support to core layers.
if self.kernel_constraint:
self.constraints[self.kernel] = self.kernel_constraint
if self.use_bias and self.bias_constraint:
self.constraints[self.bias] = self.bias_constraint
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv3DTranspose, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SeparableConv2D(tf_convolutional_layers.SeparableConv2D, Layer):
"""Depthwise separable 2D convolution.
Separable convolutions consist in first performing
a depthwise spatial convolution
(which acts on each input channel separately)
followed by a pointwise convolution which mixes together the resulting
output channels. The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Intuitively, separable convolutions can be understood as
a way to factorize a convolution kernel into two smaller kernels,
or as an extreme version of an Inception block.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filterss_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
pointwise_initializer: Initializer for the pointwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
pointwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
pointwise_constraint: Constraint function applied to
the pointwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(SeparableConv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
# TODO(fchollet): move weight constraint support to core layers.
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.pointwise_constraint = constraints.get(pointwise_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
super(SeparableConv2D, self).build(input_shape)
# TODO(fchollet): move weight constraint support to core layers.
if self.depthwise_constraint:
self.constraints[self.depthwise_kernel] = self.depthwise_constraint
if self.pointwise_constraint:
self.constraints[self.pointwise_kernel] = self.pointwise_constraint
if self.use_bias and self.bias_constraint:
self.constraints[self.bias] = self.bias_constraint
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'depthwise_initializer': initializers.serialize(
self.depthwise_initializer),
'pointwise_initializer': initializers.serialize(
self.pointwise_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'depthwise_regularizer': regularizers.serialize(
self.depthwise_regularizer),
'pointwise_regularizer': regularizers.serialize(
self.pointwise_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'depthwise_constraint': constraints.serialize(
self.depthwise_constraint),
'pointwise_constraint': constraints.serialize(
self.pointwise_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(SeparableConv2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times along the time axis.
Arguments:
size: integer. Upsampling factor.
Input shape:
3D tensor with shape: `(batch, steps, features)`.
Output shape:
3D tensor with shape: `(batch, upsampled_steps, features)`.
"""
def __init__(self, size=2, **kwargs):
super(UpSampling1D, self).__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
size = self.size * input_shape[1] if input_shape[1] is not None else None
return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])
def call(self, inputs):
output = K.repeat_elements(inputs, self.size, axis=1)
return output
def get_config(self):
config = {'size': self.size}
base_config = super(UpSampling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class UpSampling2D(Layer):
"""Upsampling layer for 2D inputs.
Repeats the rows and columns of the data
by size[0] and size[1] respectively.
Arguments:
size: int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_rows, upsampled_cols)`
"""
def __init__(self, size=(2, 2), data_format=None, **kwargs):
super(UpSampling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
self.input_spec = InputSpec(ndim=4)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], height, width])
else:
height = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], height, width, input_shape[3]])
def call(self, inputs):
return K.resize_images(inputs, self.size[0], self.size[1], self.data_format)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class UpSampling3D(Layer):
"""Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by size[0], size[1] and size[2] respectively.
Arguments:
size: int, or tuple of 3 integers.
The upsampling factors for dim1, dim2 and dim3.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, dim1, dim2, dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, dim1, dim2, dim3)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`
"""
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 3, 'size')
self.input_spec = InputSpec(ndim=5)
super(UpSampling3D, self).__init__(**kwargs)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
dim1 = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
dim2 = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
dim3 = self.size[2] * input_shape[
4] if input_shape[4] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
else:
dim1 = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
dim2 = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
dim3 = self.size[2] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return K.resize_volumes(inputs, self.size[0], self.size[1], self.size[2],
self.data_format)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ZeroPadding1D(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Arguments:
padding: int, or tuple of int (length 2), or dictionary.
- If int:
How many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of int (length 2):
How many zeros to add at the beginning and at the end of
the padding dimension (`(left_pad, right_pad)`).
Input shape:
3D tensor with shape `(batch, axis_to_pad, features)`
Output shape:
3D tensor with shape `(batch, padded_axis, features)`
"""
def __init__(self, padding=1, **kwargs):
super(ZeroPadding1D, self).__init__(**kwargs)
self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')
self.input_spec = InputSpec(ndim=3)
def _compute_output_shape(self, input_shape):
if input_shape[1] is not None:
length = input_shape[1] + self.padding[0] + self.padding[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
return K.temporal_padding(inputs, padding=self.padding)
def get_config(self):
config = {'padding': self.padding}
base_config = super(ZeroPadding1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ZeroPadding2D(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
This layer can add rows and columns of zeros
at the top, bottom, left and right side of an image tensor.
Arguments:
padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to width and height.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, padded_rows, padded_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, padded_rows, padded_cols)`
"""
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super(ZeroPadding2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, '__len__'):
if len(padding) != 2:
raise ValueError('`padding` should have two elements. '
'Found: ' + str(padding))
height_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
width_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
self.padding = (height_padding, width_padding)
else:
raise ValueError('`padding` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_pad, symmetric_width_pad), '
'or a tuple of 2 tuples of 2 ints '
'((top_pad, bottom_pad), (left_pad, right_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=4)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[3] is not None:
cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[2] is not None:
cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def call(self, inputs):
return K.spatial_2d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ZeroPadding3D(Layer):
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
Arguments:
padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to width and height.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad,
right_dim2_pad), (left_dim3_pad, right_dim3_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_padded_axis, second_padded_axis, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_padded_axis, second_padded_axis,
third_axis_to_pad)`
"""
def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs):
super(ZeroPadding3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding), (padding,
padding))
elif hasattr(padding, '__len__'):
if len(padding) != 3:
raise ValueError('`padding` should have 3 elements. '
'Found: ' + str(padding))
dim1_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
dim2_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
dim3_padding = conv_utils.normalize_tuple(padding[2], 2,
'3rd entry of padding')
self.padding = (dim1_padding, dim2_padding, dim3_padding)
else:
raise ValueError(
'`padding` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_pad, right_dim1_pad),'
' (left_dim2_pad, right_dim2_pad),'
' (left_dim3_pad, right_dim2_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=5)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] + 2 * self.padding[0][0]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] + 2 * self.padding[1][0]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] + 2 * self.padding[2][0]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] + 2 * self.padding[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] + 2 * self.padding[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] + 2 * self.padding[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return K.spatial_3d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Arguments:
cropping: int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided,
the same value will be used for both.
Input shape:
3D tensor with shape `(batch, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super(Cropping1D, self).__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')
self.input_spec = InputSpec(ndim=3)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if self.cropping[1] == 0:
return inputs[:, self.cropping[0]:, :]
else:
return inputs[:, self.cropping[0]:-self.cropping[1], :]
def get_config(self):
config = {'cropping': self.cropping}
base_config = super(Cropping1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Cropping2D(Layer):
"""Cropping layer for 2D input (e.g. picture).
It crops along spatial dimensions, i.e. width and height.
Arguments:
cropping: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to width and height.
- If tuple of 2 ints:
interpreted as two different
symmetric cropping values for height and width:
`(symmetric_height_crop, symmetric_width_crop)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_crop, bottom_crop), (left_crop, right_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, cropped_rows, cropped_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, cropped_rows, cropped_cols)`
Examples:
```python
# Crop the input 2D images or feature maps
model = Sequential()
model.add(Cropping2D(cropping=((2, 2), (4, 4)),
input_shape=(28, 28, 3)))
# now model.output_shape == (None, 24, 20, 3)
model.add(Conv2D(64, (3, 3), padding='same))
model.add(Cropping2D(cropping=((2, 2), (2, 2))))
# now model.output_shape == (None, 20, 16. 64)
```
"""
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
super(Cropping2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 2:
raise ValueError('`cropping` should have two elements. '
'Found: ' + str(cropping))
height_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
width_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
self.cropping = (height_cropping, width_cropping)
else:
raise ValueError('`cropping` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_crop, symmetric_width_crop), '
'or a tuple of 2 tuples of 2 ints '
'((top_crop, bottom_crop), (left_crop, right_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=4)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([
input_shape[0], input_shape[1],
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] else None,
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] else None
])
else:
return tensor_shape.TensorShape([
input_shape[0],
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] else None,
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] else None, input_shape[3]
])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Cropping3D(Layer):
"""Cropping layer for 3D data (e.g.
spatial or spatio-temporal).
Arguments:
cropping: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to width and height.
- If tuple of 2 ints:
interpreted as two different
symmetric cropping values for height and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(self,
cropping=((1, 1), (1, 1), (1, 1)),
data_format=None,
**kwargs):
super(Cropping3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping), (cropping,
cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 3:
raise ValueError('`cropping` should have 3 elements. '
'Found: ' + str(cropping))
dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2,
'3rd entry of cropping')
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
'`cropping` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_crop, right_dim1_crop),'
' (left_dim2_crop, right_dim2_crop),'
' (left_dim3_crop, right_dim2_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=5)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][
0]:-self.cropping[2][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1], :]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][
0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[
2][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], self.cropping[2][0]: # pylint: disable=invalid-unary-operand-type
-self.cropping[2][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Aliases
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Conv2DTranspose
Deconvolution2D = Deconv2D = Conv2DTranspose
Deconvolution3D = Deconv3D = Conv3DTranspose
| {
"content_hash": "34cc941f26e7b7b80f73cbba685136f6",
"timestamp": "",
"source": "github",
"line_count": 1696,
"max_line_length": 104,
"avg_line_length": 44.26650943396226,
"alnum_prop": 0.6327055250679312,
"repo_name": "manazhao/tf_recsys",
"id": "b5e44c89c0892f28bc6ff90a29383361966fac1d",
"size": "75765",
"binary": false,
"copies": "7",
"ref": "refs/heads/r1.0",
"path": "tensorflow/contrib/keras/python/keras/layers/convolutional.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "193348"
},
{
"name": "C++",
"bytes": "27280674"
},
{
"name": "CMake",
"bytes": "177556"
},
{
"name": "Go",
"bytes": "929281"
},
{
"name": "Java",
"bytes": "333525"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37293"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Protocol Buffer",
"bytes": "254489"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "23927341"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "337394"
},
{
"name": "Vim script",
"bytes": "2803"
}
],
"symlink_target": ""
} |
import testtools
from my_dev.tests import utils
from my_dev import users
from my_dev import ssh
class TestBase(testtools.TestCase):
def setUp(self):
super(TestBase, self).setUp()
self.user = users.Users()
def create_user(self, username=None, password=None, email=None):
username = username if username else utils.rand_name('username')
password = password if password else utils.rand_name('password')
email = email if email else utils.rand_name('email')
return self.user.create(username, password, email)
def create_ssh(self, user_id, ssh_username=utils.rand_name('username'),
ssh_password=utils.rand_name('password'),
host=utils.rand_name('host'),
alias=utils.rand_name('alias')):
ssh_client = ssh.Ssh(user_id)
return ssh_client.create(host=host, ssh_username=ssh_username,
ssh_password=ssh_password, alias=alias)
| {
"content_hash": "80fb20818997ce7d7887473d4650c200",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 75,
"avg_line_length": 39.4,
"alnum_prop": 0.6355329949238578,
"repo_name": "esikachev/my-dev-client",
"id": "4d6b5b04664e8161c9a18372222900337b60163b",
"size": "985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "my_dev/tests/functional/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12160"
}
],
"symlink_target": ""
} |
"""Config controller. Other modules should `from yuno.core.config import config`
to get the settings as a global object. At some point early on, something
somewhere (normally yuno.py) needs to `load_default()` or `load_json()` to
load whatever settings it wants.
"""
import json
from yuno.core.errors import *
from yuno.core.util import decomment_json
class ConfigNamespace(object):
"""A kind-of-smart container for config settings that can throw the right
exceptions when something goes wrong. That way people know what they did
wrong when they delete a setting and the whole program explodes.
"""
def __getattr__(cls, name):
raise UndefinedConfigKey(name)
def load_json(filename):
"""Add the contents of the JSON file to the config object, overwriting
the existing key on name collisions.
"""
try:
config_file = open(filename)
settings = json.loads(decomment_json(config_file.read()))
set_from_dict(settings)
config_file.close()
except ValueError:
raise ConfigParseError(filename)
except IOError:
raise ConfigLoadError(filename)
def load_default():
load_json('settings/config.json')
def set_from_dict(dict_):
for key, value in dict_.items():
update(key, value)
def update(key, value):
setattr(config, key, value)
# Run this out here so the first module to `import config` creates the real
# config object for others to import.
config = ConfigNamespace()
| {
"content_hash": "d952b8ccdad439605633236f6a4e1380",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 80,
"avg_line_length": 27.145454545454545,
"alnum_prop": 0.6999330207635633,
"repo_name": "bulatb/yuno",
"id": "79759bcd18723d79f772a4426695e2cd1ae86e17",
"size": "1493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yuno/core/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90590"
},
{
"name": "Shell",
"bytes": "6706"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
urlpatterns = patterns('aquaticore.databases.views',
(r'^$', 'index'),
(r'^(?P<database_id>\d+)/$', 'detail'),
)
| {
"content_hash": "299dc843af81fcc2dbeb4bacfdbf2121",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 52,
"avg_line_length": 32,
"alnum_prop": 0.5208333333333334,
"repo_name": "rockerBOO/aquaticore",
"id": "a038a3d6842001d07eaa62d96d8c358bcb2d3ecc",
"size": "192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aquaticore/databases/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13106"
},
{
"name": "HTML",
"bytes": "44504"
},
{
"name": "JavaScript",
"bytes": "11994"
},
{
"name": "PHP",
"bytes": "17127"
},
{
"name": "Python",
"bytes": "51374"
}
],
"symlink_target": ""
} |
import pydev_log
import traceback
import pydevd_resolver
from pydevd_constants import * #@UnusedWildImport
from types import * #@UnusedWildImport
try:
from urllib import quote
except:
from urllib.parse import quote #@UnresolvedImport
try:
from xml.sax.saxutils import escape
def makeValidXmlValue(s):
return escape(s, {'"': '"'})
except:
#Simple replacement if it's not there.
def makeValidXmlValue(s):
return s.replace('<', '<').replace('>', '>').replace('"', '"')
class ExceptionOnEvaluate:
def __init__(self, result):
self.result = result
#------------------------------------------------------------------------------------------------------ resolvers in map
if not sys.platform.startswith("java"):
typeMap = [
#None means that it should not be treated as a compound variable
#isintance does not accept a tuple on some versions of python, so, we must declare it expanded
(type(None), None,),
(int, None),
(float, None),
(complex, None),
(str, None),
(tuple, pydevd_resolver.tupleResolver),
(list, pydevd_resolver.tupleResolver),
(dict, pydevd_resolver.dictResolver),
]
try:
typeMap.append((long, None))
except:
pass #not available on all python versions
try:
typeMap.append((unicode, None))
except:
pass #not available on all python versions
try:
typeMap.append((set, pydevd_resolver.setResolver))
except:
pass #not available on all python versions
try:
typeMap.append((frozenset, pydevd_resolver.setResolver))
except:
pass #not available on all python versions
else: #platform is java
from org.python import core #@UnresolvedImport
typeMap = [
(core.PyNone, None),
(core.PyInteger, None),
(core.PyLong, None),
(core.PyFloat, None),
(core.PyComplex, None),
(core.PyString, None),
(core.PyTuple, pydevd_resolver.tupleResolver),
(core.PyList, pydevd_resolver.tupleResolver),
(core.PyDictionary, pydevd_resolver.dictResolver),
(core.PyStringMap, pydevd_resolver.dictResolver),
]
if hasattr(core, 'PyJavaInstance'):
#Jython 2.5b3 removed it.
typeMap.append((core.PyJavaInstance, pydevd_resolver.instanceResolver))
def getType(o):
""" returns a triple (typeObject, typeString, resolver
resolver != None means that variable is a container,
and should be displayed as a hierarchy.
Use the resolver to get its attributes.
All container objects should have a resolver.
"""
try:
type_object = type(o)
type_name = type_object.__name__
except:
#This happens for org.python.core.InitModule
return 'Unable to get Type', 'Unable to get Type', None
try:
if type_name == 'org.python.core.PyJavaInstance':
return type_object, type_name, pydevd_resolver.instanceResolver
if type_name == 'org.python.core.PyArray':
return type_object, type_name, pydevd_resolver.jyArrayResolver
for t in typeMap:
if isinstance(o, t[0]):
return type_object, type_name, t[1]
except:
traceback.print_exc()
#no match return default
return type_object, type_name, pydevd_resolver.defaultResolver
def frameVarsToXML(frame_f_locals):
""" dumps frame variables to XML
<var name="var_name" scope="local" type="type" value="value"/>
"""
xml = ""
keys = frame_f_locals.keys()
if hasattr(keys, 'sort'):
keys.sort() #Python 3.0 does not have it
else:
keys = sorted(keys) #Jython 2.1 does not have it
for k in keys:
try:
v = frame_f_locals[k]
xml += varToXML(v, str(k))
except Exception:
traceback.print_exc()
pydev_log.error("Unexpected error, recovered safely.\n")
return xml
def varToXML(val, name, doTrim=True):
""" single variable or dictionary to xml representation """
is_exception_on_eval = isinstance(val, ExceptionOnEvaluate)
if is_exception_on_eval:
v = val.result
else:
v = val
type, typeName, resolver = getType(v)
try:
if hasattr(v, '__class__'):
try:
cName = str(v.__class__)
if cName.find('.') != -1:
cName = cName.split('.')[-1]
elif cName.find("'") != -1: #does not have '.' (could be something like <type 'int'>)
cName = cName[cName.index("'") + 1:]
if cName.endswith("'>"):
cName = cName[:-2]
except:
cName = str(v.__class__)
value = '%s: %s' % (cName, v)
else:
value = str(v)
except:
try:
value = repr(v)
except:
value = 'Unable to get repr for %s' % v.__class__
try:
name = quote(name, '/>_= ') #TODO: Fix PY-5834 without using quote
except:
pass
xml = '<var name="%s" type="%s"' % (makeValidXmlValue(name), makeValidXmlValue(typeName))
if value:
#cannot be too big... communication may not handle it.
if len(value) > MAXIMUM_VARIABLE_REPRESENTATION_SIZE and doTrim:
value = value[0:MAXIMUM_VARIABLE_REPRESENTATION_SIZE]
value += '...'
#fix to work with unicode values
try:
if not IS_PY3K:
if isinstance(value, unicode):
value = value.encode('utf-8')
else:
if isinstance(value, bytes):
value = value.encode('utf-8')
except TypeError: #in java, unicode is a function
pass
xmlValue = ' value="%s"' % (makeValidXmlValue(quote(value, '/>_= ')))
else:
xmlValue = ''
if is_exception_on_eval:
xmlCont = ' isErrorOnEval="True"'
else:
if resolver is not None:
xmlCont = ' isContainer="True"'
else:
xmlCont = ''
return ''.join((xml, xmlValue, xmlCont, ' />\n'))
| {
"content_hash": "a908dc567e10f969c607d5c1f6587492",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 120,
"avg_line_length": 30.08133971291866,
"alnum_prop": 0.5582948942261811,
"repo_name": "IllusionRom-deprecated/android_platform_tools_idea",
"id": "73258e6fcc655cc20b08ec886b800ffee12a642f",
"size": "6287",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/helpers/pydev/pydevd_xml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "177802"
},
{
"name": "C#",
"bytes": "390"
},
{
"name": "C++",
"bytes": "78894"
},
{
"name": "CSS",
"bytes": "102018"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groovy",
"bytes": "1906667"
},
{
"name": "J",
"bytes": "5050"
},
{
"name": "Java",
"bytes": "128322265"
},
{
"name": "JavaScript",
"bytes": "123045"
},
{
"name": "Objective-C",
"bytes": "22558"
},
{
"name": "Perl",
"bytes": "6549"
},
{
"name": "Python",
"bytes": "17760420"
},
{
"name": "Ruby",
"bytes": "1213"
},
{
"name": "Shell",
"bytes": "76554"
},
{
"name": "TeX",
"bytes": "60798"
},
{
"name": "XSLT",
"bytes": "113531"
}
],
"symlink_target": ""
} |
from helpers.plotter import Plotter
from helpers.metric import Calculator
import matplotlib.pyplot as plt
############## ALL GRU PLOTS ############################
result_file_1 = 'result/simple/gru/no_attention.csv'
result_file_2 = 'result/bidirectional/gru/no_attention.csv'
result_file_3 = 'result/stacked_simple/gru/no_attention.csv'
result_file_4 = 'result/stacked_bidirectional/gru/no_attention.csv'
result_file_description = ['gru_smpl', 'gru_bidr', 'gru_stack_smpl', 'gru_stack_bidr']
hypothesis_dir = 'metrics/hypothesis'
reference_dir = 'metrics/reference'
bleu_1 = []
bleu_2 = []
bleu_3 = []
bleu_4 = []
rouge = []
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_1)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_2)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_3)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_4)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
steps = calculator.get_steps()
plotter = Plotter()
plotter.set_metrics(bleu_1,bleu_2,bleu_3,bleu_4,rouge)
plotter.set_file_description(result_file_description)
plotter.set_steps(steps)
plotter.plot_all_metrics()
########## ALL LSTM PLOTS ####################
result_file_1 = 'result/simple/lstm/no_attention.csv'
result_file_2 = 'result/bidirectional/lstm/no_attention.csv'
result_file_3 = 'result/stacked_simple/lstm/no_attention.csv'
result_file_4 = 'result/stacked_bidirectional/lstm/no_attention.csv'
result_file_description = ['lstm_smpl','lstm_bidr','lstm_stack_smpl','lstm_stack_bidr']
hypothesis_dir = 'metrics/hypothesis'
reference_dir = 'metrics/reference'
bleu_1 = []
bleu_2 = []
bleu_3 = []
bleu_4 = []
rouge = []
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_1)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_2)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_3)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_4)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
steps = calculator.get_steps()
plotter = Plotter()
plotter.set_metrics(bleu_1,bleu_2,bleu_3,bleu_4,rouge)
plotter.set_file_description(result_file_description)
plotter.set_steps(steps)
plotter.plot_all_metrics()
#### GRU and LSTM Comparison plots #####
## SIMPLE
result_file_1 = 'result/simple/gru/no_attention.csv'
result_file_2 = 'result/simple/lstm/no_attention.csv'
result_file_description = ['gru_simple','lstm_simple']
bleu_1 = []
bleu_2 = []
bleu_3 = []
bleu_4 = []
rouge = []
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_1)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_2)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
steps = calculator.get_steps()
plotter = Plotter()
plotter.set_metrics(bleu_1,bleu_2,bleu_3,bleu_4,rouge)
plotter.set_file_description(result_file_description)
plotter.set_steps(steps)
plotter.plot_all_metrics()
## BIDIRECTIONAL
result_file_1 = 'result/bidirectional/gru/no_attention.csv'
result_file_2 = 'result/bidirectional/lstm/no_attention.csv'
result_file_description = ['gru_bidir','lstm_bidir']
bleu_1 = []
bleu_2 = []
bleu_3 = []
bleu_4 = []
rouge = []
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_1)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_2)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
steps = calculator.get_steps()
plotter = Plotter()
plotter.set_metrics(bleu_1,bleu_2,bleu_3,bleu_4,rouge)
plotter.set_file_description(result_file_description)
plotter.set_steps(steps)
plotter.plot_all_metrics()
## STACKED_SIMPLE
result_file_1 = 'result/stacked_simple/gru/no_attention.csv'
result_file_2 = 'result/stacked_simple/lstm/no_attention.csv'
result_file_description = ['gru_stacked','lstm_stacked']
bleu_1 = []
bleu_2 = []
bleu_3 = []
bleu_4 = []
rouge = []
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_1)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_2)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
steps = calculator.get_steps()
plotter = Plotter()
plotter.set_metrics(bleu_1,bleu_2,bleu_3,bleu_4,rouge)
plotter.set_file_description(result_file_description)
plotter.set_steps(steps)
plotter.plot_all_metrics()
## STACKED BIDIRECTIONAL
result_file_1 = 'result/stacked_bidirectional/gru/no_attention.csv'
result_file_2 = 'result/stacked_bidirectional/lstm/no_attention.csv'
result_file_description = ['gru_stack_bidir','lstm_stack_bidir']
bleu_1 = []
bleu_2 = []
bleu_3 = []
bleu_4 = []
rouge = []
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_1)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_2)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
steps = calculator.get_steps()
plotter = Plotter()
plotter.set_metrics(bleu_1,bleu_2,bleu_3,bleu_4,rouge)
plotter.set_file_description(result_file_description)
plotter.set_steps(steps)
plotter.plot_all_metrics()
# SHOW ALL PLOTS
plt.show()
| {
"content_hash": "aa646cd37b5580927c7a269bbfea576c",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 87,
"avg_line_length": 29.881967213114756,
"alnum_prop": 0.7454465657230634,
"repo_name": "harpribot/deep-summarization",
"id": "80a3e6ecf18950961d535d2aa49b10fc8338fbad",
"size": "9114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evaluation_plot_script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131594"
},
{
"name": "Shell",
"bytes": "1220"
}
],
"symlink_target": ""
} |
import numpy as np
from itertools import combinations
from functools import partial
from sklearn.utils import column_or_1d, check_consistent_length, check_array
from sklearn.preprocessing import label_binarize
from sklearn.metrics import auc, roc_curve
from sklearn.utils.multiclass import type_of_target
def _encode_check_unknown(values, uniques, return_mask=False):
"""
Helper function to check for unknowns in values to be encoded.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : array
Values to check for unknowns.
uniques : array
Allowed uniques values.
return_mask : bool, default False
If True, return a mask of the same shape as `values` indicating
the valid values.
Returns
-------
diff : list
The unique values present in `values` and not in `uniques` (the
unknown values).
valid_mask : boolean array
Additionally returned if ``return_mask=True``.
"""
if values.dtype == object:
uniques_set = set(uniques)
diff = list(set(values) - uniques_set)
if return_mask:
if diff:
valid_mask = np.array([val in uniques_set for val in values])
else:
valid_mask = np.ones(len(values), dtype=bool)
return diff, valid_mask
else:
return diff
else:
unique_values = np.unique(values)
diff = list(np.setdiff1d(unique_values, uniques, assume_unique=True))
if return_mask:
if diff:
valid_mask = np.in1d(values, uniques)
else:
valid_mask = np.ones(len(values), dtype=bool)
return diff, valid_mask
else:
return diff
def _encode_numpy(values, uniques=None, encode=False, check_unknown=True):
# only used in _encode below, see docstring there for details
if uniques is None:
if encode:
uniques, encoded = np.unique(values, return_inverse=True)
return uniques, encoded
else:
# unique sorts
return np.unique(values)
if encode:
if check_unknown:
diff = _encode_check_unknown(values, uniques)
if diff:
raise ValueError("y contains previously unseen labels: %s"
.format(str(diff)))
encoded = np.searchsorted(uniques, values)
return uniques, encoded
else:
return uniques
def _encode_python(values, uniques=None, encode=False):
# only used in _encode below, see docstring there for details
if uniques is None:
uniques = sorted(set(values))
uniques = np.array(uniques, dtype=values.dtype)
if encode:
table = {val: i for i, val in enumerate(uniques)}
try:
encoded = np.array([table[v] for v in values])
except KeyError as e:
raise ValueError("y contains previously unseen labels: %s"
.format(str(e)))
return uniques, encoded
else:
return uniques
def _encode(values, uniques=None, encode=False, check_unknown=True):
"""Helper function to factorize (find uniques) and encode values.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
The numpy method has the limitation that the `uniques` need to
be sorted. Importantly, this is not checked but assumed to already be
the case. The calling method needs to ensure this for all non-object
values.
Parameters
----------
values : array
Values to factorize or encode.
uniques : array, optional
If passed, uniques are not determined from passed values (this
can be because the user specified categories, or because they
already have been determined in fit).
encode : bool, default False
If True, also encode the values into integer codes based on `uniques`.
check_unknown : bool, default True
If True, check for values in ``values`` that are not in ``unique``
and raise an error. This is ignored for object dtype, and treated as
True in this case. This parameter is useful for
_BaseEncoder._transform() to avoid calling _encode_check_unknown()
twice.
Returns
-------
uniques
If ``encode=False``. The unique values are sorted if the `uniques`
parameter was None (and thus inferred from the data).
(uniques, encoded)
If ``encode=True``.
"""
if values.dtype == object:
try:
res = _encode_python(values, uniques, encode)
except TypeError:
types = sorted(t.__qualname__
for t in set(type(v) for v in values))
raise TypeError("Encoders require their input to be uniformly " +
"strings or numbers. Got "+" ".join(types))
return res
else:
return _encode_numpy(values, uniques, encode,
check_unknown=check_unknown)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None,
max_fpr=None, multi_class="raise", labels=None):
"""Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC)
from prediction scores.
Note: this implementation can be used with binary, multiclass and
multilabel classification, but some restrictions apply (see Parameters).
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_classes)
True labels or binary label indicators. The binary and multiclass cases
expect labels with shape (n_samples,) while the multilabel case expects
binary label indicators with shape (n_samples, n_classes).
y_score : array-like of shape (n_samples,) or (n_samples, n_classes)
Target scores. In the binary and multilabel cases, these can be either
probability estimates or non-thresholded decision values (as returned
by `decision_function` on some classifiers). In the multiclass case,
these must be probability estimates which sum to 1. The binary
case expects a shape (n_samples,), and the scores must be the scores of
the class with the greater label. The multiclass and multilabel
cases expect a shape (n_samples, n_classes). In the multiclass case,
the order of the class scores must correspond to the order of
``labels``, if provided, or else to the numerical or lexicographical
order of the labels in ``y_true``.
average : {'micro', 'macro', 'samples', 'weighted'} or None, \
default='macro'
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
Note: multiclass ROC AUC currently only handles the 'macro' and
'weighted' averages.
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
max_fpr : float > 0 and <= 1, default=None
If not ``None``, the standardized partial AUC [2]_ over the range
[0, max_fpr] is returned. For the multiclass case, ``max_fpr``,
should be either equal to ``None`` or ``1.0`` as AUC ROC partial
computation currently is not supported for multiclass.
multi_class : {'raise', 'ovr', 'ovo'}, default='raise'
Multiclass only. Determines the type of configuration to use. The
default value raises an error, so either ``'ovr'`` or ``'ovo'`` must be
passed explicitly.
``'ovr'``:
Computes the AUC of each class against the rest [3]_ [4]_. This
treats the multiclass case in the same way as the multilabel case.
Sensitive to class imbalance even when ``average == 'macro'``,
because class imbalance affects the composition of each of the
'rest' groupings.
``'ovo'``:
Computes the average AUC of all possible pairwise combinations of
classes [5]_. Insensitive to class imbalance when
``average == 'macro'``.
labels : array-like of shape (n_classes,), default=None
Multiclass only. List of labels that index the classes in ``y_score``.
If ``None``, the numerical or lexicographical order of the labels in
``y_true`` is used.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. [2] `Analyzing a portion of the ROC curve. McClish, 1989
<https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_
.. [3] Provost, F., Domingos, P. (2000). Well-trained PETs: Improving
probability estimation trees (Section 6.2), CeDER Working Paper
#IS-00-04, Stern School of Business, New York University.
.. [4] `Fawcett, T. (2006). An introduction to ROC analysis. Pattern
Recognition Letters, 27(8), 861-874.
<https://www.sciencedirect.com/science/article/pii/S016786550500303X>`_
.. [5] `Hand, D.J., Till, R.J. (2001). A Simple Generalisation of the Area
Under the ROC Curve for Multiple Class Classification Problems.
Machine Learning, 45(2), 171-186.
<http://link.springer.com/article/10.1023/A:1010920819831>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC) curve
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
y_type = type_of_target(y_true)
y_true = check_array(y_true, ensure_2d=False, dtype=None)
y_score = check_array(y_score, ensure_2d=False)
if y_type == "multiclass" or (y_type == "binary" and
y_score.ndim == 2 and
y_score.shape[1] > 2):
# do not support partial ROC computation for multiclass
if max_fpr is not None and max_fpr != 1.:
raise ValueError("Partial AUC computation not available in "
"multiclass setting, 'max_fpr' must be"
" set to `None`, received `max_fpr={0}` "
"instead".format(max_fpr))
if multi_class == 'raise':
raise ValueError("multi_class must be in ('ovo', 'ovr')")
return _multiclass_roc_auc_score(y_true, y_score, labels,
multi_class, average, sample_weight)
elif y_type == "binary":
labels = np.unique(y_true)
y_true = label_binarize(y_true, classes=labels)[:, 0]
return _average_binary_score(partial(_binary_roc_auc_score,
max_fpr=max_fpr),
y_true, y_score, average,
sample_weight=sample_weight)
else: # multilabel-indicator
return _average_binary_score(partial(_binary_roc_auc_score,
max_fpr=max_fpr),
y_true, y_score, average,
sample_weight=sample_weight)
def _average_binary_score(binary_metric, y_true, y_score, average,
sample_weight=None):
"""Average a binary metric for multilabel classification
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
binary_metric : callable, returns shape [n_classes]
The binary metric function to use.
Returns
-------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options:
raise ValueError('average has to be one of {0}'
''.format(average_options))
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None:
score_weight = np.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == 'weighted':
if score_weight is not None:
average_weight = np.sum(np.multiply(
y_true, np.reshape(score_weight, (-1, 1))), axis=0)
else:
average_weight = np.sum(y_true, axis=0)
if np.isclose(average_weight.sum(), 0.0):
return 0
elif average == 'samples':
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c,
sample_weight=score_weight)
# Average the results
if average is not None:
if average_weight is not None:
# Scores with 0 weights are forced to be 0, preventing the average
# score from being affected by 0-weighted NaN elements.
average_weight = np.asarray(average_weight)
score[average_weight == 0] = 0
return np.average(score, weights=average_weight)
else:
return score
def _average_multiclass_ovo_score(binary_metric, y_true, y_score,
average='macro'):
"""Average one-versus-one scores for multiclass classification.
Uses the binary metric for one-vs-one multiclass classification,
where the score is computed according to the Hand & Till (2001) algorithm.
Parameters
----------
binary_metric : callable
The binary metric function to use that accepts the following as input:
y_true_target : array, shape = [n_samples_target]
Some sub-array of y_true for a pair of classes designated
positive and negative in the one-vs-one scheme.
y_score_target : array, shape = [n_samples_target]
Scores corresponding to the probability estimates
of a sample belonging to the designated positive class label
y_true : array-like of shape (n_samples,)
True multiclass labels.
y_score : array-like of shape (n_samples, n_classes)
Target scores corresponding to probability estimates of a sample
belonging to a particular class.
average : {'macro', 'weighted'}, default='macro'
Determines the type of averaging performed on the pairwise binary
metric scores:
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account. Classes
are assumed to be uniformly distributed.
``'weighted'``:
Calculate metrics for each label, taking into account the
prevalence of the classes.
Returns
-------
score : float
Average of the pairwise binary metric scores.
"""
check_consistent_length(y_true, y_score)
y_true_unique = np.unique(y_true)
n_classes = y_true_unique.shape[0]
n_pairs = n_classes * (n_classes - 1) // 2
pair_scores = np.empty(n_pairs)
is_weighted = average == "weighted"
prevalence = np.empty(n_pairs) if is_weighted else None
# Compute scores treating a as positive class and b as negative class,
# then b as positive class and a as negative class
for ix, (a, b) in enumerate(combinations(y_true_unique, 2)):
a_mask = y_true == a
b_mask = y_true == b
ab_mask = np.logical_or(a_mask, b_mask)
if is_weighted:
prevalence[ix] = np.average(ab_mask)
a_true = a_mask[ab_mask]
b_true = b_mask[ab_mask]
a_true_score = binary_metric(a_true, y_score[ab_mask, a])
b_true_score = binary_metric(b_true, y_score[ab_mask, b])
pair_scores[ix] = (a_true_score + b_true_score) / 2
return np.average(pair_scores, weights=prevalence)
def _multiclass_roc_auc_score(y_true, y_score, labels,
multi_class, average, sample_weight):
"""Multiclass roc auc score
Parameters
----------
y_true : array-like of shape (n_samples,)
True multiclass labels.
y_score : array-like of shape (n_samples, n_classes)
Target scores corresponding to probability estimates of a sample
belonging to a particular class
labels : array, shape = [n_classes] or None, optional (default=None)
List of labels to index ``y_score`` used for multiclass. If ``None``,
the lexical order of ``y_true`` is used to index ``y_score``.
multi_class : string, 'ovr' or 'ovo'
Determines the type of multiclass configuration to use.
``'ovr'``:
Calculate metrics for the multiclass case using the one-vs-rest
approach.
``'ovo'``:
Calculate metrics for the multiclass case using the one-vs-one
approach.
average : 'macro' or 'weighted', optional (default='macro')
Determines the type of averaging performed on the pairwise binary
metric scores
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account. Classes
are assumed to be uniformly distributed.
``'weighted'``:
Calculate metrics for each label, taking into account the
prevalence of the classes.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
"""
# validation of the input y_score
if not np.allclose(1, y_score.sum(axis=1)):
raise ValueError(
"Target scores need to be probabilities for multiclass "
"roc_auc, i.e. they should sum up to 1.0 over classes")
# validation for multiclass parameter specifications
average_options = ("macro", "weighted")
if average not in average_options:
raise ValueError("average must be one of {0} for "
"multiclass problems".format(average_options))
multiclass_options = ("ovo", "ovr")
if multi_class not in multiclass_options:
raise ValueError("multi_class='{0}' is not supported "
"for multiclass ROC AUC, multi_class must be "
"in {1}".format(
multi_class, multiclass_options))
if labels is not None:
labels = column_or_1d(labels)
classes = _encode(labels)
if len(classes) != len(labels):
raise ValueError("Parameter 'labels' must be unique")
if not np.array_equal(classes, labels):
raise ValueError("Parameter 'labels' must be ordered")
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of given labels, {0}, not equal to the number "
"of columns in 'y_score', {1}".format(
len(classes), y_score.shape[1]))
if len(np.setdiff1d(y_true, classes)):
raise ValueError(
"'y_true' contains labels not in parameter 'labels'")
else:
classes = _encode(y_true)
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of classes in y_true not equal to the number of "
"columns in 'y_score'")
if multi_class == "ovo":
if sample_weight is not None:
raise ValueError("sample_weight is not supported "
"for multiclass one-vs-one ROC AUC, "
"'sample_weight' must be None in this case.")
_, y_true_encoded = _encode(y_true, uniques=classes, encode=True)
# Hand & Till (2001) implementation (ovo)
return _average_multiclass_ovo_score(_binary_roc_auc_score,
y_true_encoded,
y_score, average=average)
else:
# ovr is same as multi-label
y_true_multilabel = label_binarize(y_true, classes=classes)
return _average_binary_score(_binary_roc_auc_score, y_true_multilabel,
y_score, average,
sample_weight=sample_weight)
def _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None):
"""Binary roc auc score"""
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, _ = roc_curve(y_true, y_score,
sample_weight=sample_weight)
if max_fpr is None or max_fpr == 1:
return auc(fpr, tpr)
if max_fpr <= 0 or max_fpr > 1:
raise ValueError("Expected max_fpr in range (0, 1], got: %r".format(max_fpr))
# Add a single point at max_fpr by linear interpolation
stop = np.searchsorted(fpr, max_fpr, 'right')
x_interp = [fpr[stop - 1], fpr[stop]]
y_interp = [tpr[stop - 1], tpr[stop]]
tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp))
fpr = np.append(fpr[:stop], max_fpr)
partial_auc = auc(fpr, tpr)
# McClish correction: standardize result to be 0.5 if non-discriminant
# and 1 if maximal
min_area = 0.5 * max_fpr**2
max_area = max_fpr
return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
| {
"content_hash": "3f9b9bf7b676665c4bfbb8db45981a31",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 85,
"avg_line_length": 42.473958333333336,
"alnum_prop": 0.600531371346822,
"repo_name": "michalkurka/h2o-3",
"id": "41e120d6221a92ee051ee250be6a10a78d1e275c",
"size": "24995",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "h2o-py/tests/pyunit_utils/sklearn_multinomial_auc_method.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12629"
},
{
"name": "CSS",
"bytes": "231770"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "Dockerfile",
"bytes": "10302"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "166480"
},
{
"name": "HCL",
"bytes": "15007"
},
{
"name": "HTML",
"bytes": "251906"
},
{
"name": "HiveQL",
"bytes": "3965"
},
{
"name": "Java",
"bytes": "11932863"
},
{
"name": "JavaScript",
"bytes": "89484"
},
{
"name": "Jupyter Notebook",
"bytes": "13867219"
},
{
"name": "Makefile",
"bytes": "50635"
},
{
"name": "Python",
"bytes": "6801044"
},
{
"name": "R",
"bytes": "3223113"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "33647"
},
{
"name": "Shell",
"bytes": "186559"
},
{
"name": "TeX",
"bytes": "634412"
}
],
"symlink_target": ""
} |
def get_condition(args):
return COND_MAP[args[0]](*args[1:])
class ValueCondition:
__slots__ = ["property", "min", "max"]
def __init__(self, prop, _min, _max):
self.property = prop
if type(_min) == str:
_min = float(_min)
if type(_max) == str:
_max = float(_max)
self.min = _min
self.max = _max
def test(self, agent):
return self.min < getattr(agent, self.property) < self.max
COND_MAP = {
"VALUE" : ValueCondition,
}
| {
"content_hash": "35f642774be964dbaece543c00e5f71c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 60,
"avg_line_length": 18.75,
"alnum_prop": 0.5977777777777777,
"repo_name": "Moguri/ullur",
"id": "dae24efdee633a373612ed39b3ab5986ad4b0e7d",
"size": "1063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/scripts/ai/decision_strategies/conditions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "F#",
"bytes": "524"
},
{
"name": "Python",
"bytes": "133542"
},
{
"name": "Shell",
"bytes": "5094"
}
],
"symlink_target": ""
} |
"""Pivotal tracker issue management."""
from twisted.internet import defer
from twisted.python import log
import treq
import json
class PivotalTrackerIssues(object):
"""Pivotal tracker issues management."""
def __init__(self, options):
"""Set up API basics."""
self._options = options
self.token = "e8d2ec75253d7f30dba16d3e463448aa"
self.project_id = "1120728"
self.api_base =\
"https://www.pivotaltracker.com/services/v5/projects/{}/"\
.format(self.project_id)
def treq_get(self, url):
"""Wrap get request to api."""
return treq.get(
url, headers={"X-TrackerToken": self.token}
).addCallback(lambda resp: resp.json())
def treq_send(self, method, url, data):
"""Wrap send put/post to api."""
json_data = json.dumps(data)
return getattr(treq, method)(url, json_data, headers={
"X-TrackerToken": self.token,
"Content-Type": "application/json"
}).addCallback(lambda resp: resp.json())
def get_comment(self, issue_id, comment_id):
"""Get a comment from PT."""
url = self.api_base + "stories/{}/comments/{}"\
.format(issue_id, comment_id)
return self.treq_get(url)
def create_comment(self, issue_id, notes):
"""Create a comment on PT."""
url = self.api_base + "stories/{}/comments".format(issue_id)
return self.treq_send("post", url, {"text": notes}).addCallback(
self.handle_response)
def handle_response(self, data):
"""Some data."""
return data['id']
@defer.inlineCallbacks
def finish_comment(self, issue_id, comment_id, notes):
"""Finish up the comment on PT."""
log.msg("Sending finish comment: issue: {}, comment: {}".format(
issue_id, comment_id))
comment = yield self.get_comment(issue_id, comment_id)
text = comment['text']
text += "\n\n" + notes
url = self.api_base + "stories/{}/comments/{}"\
.format(issue_id, comment_id)
response = yield self.treq_send("put", url, {"text": text})
defer.returnValue(self.handle_response(response))
def set_issue_started(self, issue_id):
"""Set the issue as started."""
url = self.api_base + "stories/{}"\
.format(issue_id)
data = {"current_state": "started"}
return self.treq_send("put", url, data).addCallback(
self.handle_response)
| {
"content_hash": "26eac98560d3a8710809f1510e2f28a1",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 72,
"avg_line_length": 34.94444444444444,
"alnum_prop": 0.5858505564387917,
"repo_name": "dpnova/devdaemon",
"id": "864de316e7813400bf0486f70d31c96829c8a614",
"size": "2516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devdaemon/issues/pivotaltracker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16631"
}
],
"symlink_target": ""
} |
import random
from datetime import datetime, timedelta
import pytest
import pytz
import maya
from maya.compat import cmp
Los_Angeles = pytz.timezone("America/Los_Angeles")
New_York = pytz.timezone("America/New_York")
Melbourne = pytz.timezone("Australia/Melbourne")
def test_interval_requires_2_of_start_end_duration():
start = maya.now()
end = start.add(hours=1)
with pytest.raises(ValueError):
maya.MayaInterval(start=start)
with pytest.raises(ValueError):
maya.MayaInterval(end=end)
with pytest.raises(ValueError):
maya.MayaInterval(duration=60)
with pytest.raises(ValueError):
maya.MayaInterval(start=start, end=end, duration=60)
maya.MayaInterval(start=start, end=end)
maya.MayaInterval(start=start, duration=60)
maya.MayaInterval(end=end, duration=60)
def test_interval_requires_end_time_after_or_on_start_time():
with pytest.raises(ValueError):
maya.MayaInterval(start=maya.now(), duration=0)
maya.MayaInterval(start=maya.now(), duration=-1)
def test_interval_init_start_end():
start = maya.now()
end = start.add(hours=1)
interval = maya.MayaInterval(start=start, end=end)
assert interval.start == start
assert interval.end == end
def test_interval_init_start_duration():
start = maya.now()
duration = 1
interval = maya.MayaInterval(start=start, duration=duration)
assert interval.start == start
assert interval.end == start.add(seconds=duration)
def test_interval_init_end_duration():
end = maya.now()
duration = 1
interval = maya.MayaInterval(end=end, duration=duration)
assert interval.end == end
assert interval.start == end.subtract(seconds=duration)
@pytest.mark.parametrize(
"start_doy1,end_doy1,start_doy2,end_doy2,intersection_doys",
(
(0, 2, 1, 3, (1, 2)),
(0, 2, 3, 4, None),
(0, 2, 2, 3, None),
(0, 1, 0, 1, (0, 1)),
(1, 1, 1, 3, (1, 1)),
(1, 1, 1, 1, (1, 1)),
(1, 1, 2, 3, None),
(2, 2, 1, 3, (2, 2)),
(1, 3, 1, 1, (1, 1)),
(2, 3, 1, 1, None),
(1, 3, 2, 2, (2, 2)),
),
ids=(
"overlapping",
"non-overlapping",
"adjacent",
"equal",
"instant overlapping start only",
"instant equal",
"instant disjoint",
"instant overlapping",
"instant overlapping start only (left)",
"instant disjoint (left)",
"instant overlapping (left)",
),
)
def test_interval_intersection(
start_doy1, end_doy1, start_doy2, end_doy2, intersection_doys
):
base = maya.MayaDT.from_datetime(datetime(2016, 1, 1))
interval1 = maya.MayaInterval(base.add(days=start_doy1), base.add(days=end_doy1))
interval2 = maya.MayaInterval(base.add(days=start_doy2), base.add(days=end_doy2))
if intersection_doys:
start_doy_intersection, end_doy_intersection = intersection_doys
assert interval1 & interval2 == maya.MayaInterval(
base.add(days=start_doy_intersection), base.add(days=end_doy_intersection)
)
else:
assert (interval1 & interval2) is None
# check invalid argument
with pytest.raises(TypeError):
interval1 & "invalid type"
def test_interval_intersects():
base = maya.MayaDT.from_datetime(datetime(2016, 1, 1))
interval = maya.MayaInterval(base, base.add(days=1))
assert interval.intersects(interval)
assert not interval.intersects(
maya.MayaInterval(base.add(days=2), base.add(days=3))
)
# check invalid argument
with pytest.raises(TypeError):
interval.intersects("invalid type")
def test_and_operator():
base = maya.MayaDT.from_datetime(datetime(2016, 1, 1))
interval1 = maya.MayaInterval(base, base.add(days=2))
interval2 = maya.MayaInterval(base.add(days=1), base.add(days=3))
assert (
interval1 & interval2
== interval2 & interval1 # noqa
== interval1.intersection(interval2) # noqa
)
# check invalid argument
with pytest.raises(TypeError):
interval1.intersection("invalid type")
def test_interval_eq_operator():
start = maya.now()
end = start.add(hours=1)
interval = maya.MayaInterval(start=start, end=end)
assert interval == maya.MayaInterval(start=start, end=end)
assert interval != maya.MayaInterval(start=start, end=end.add(days=1))
# check invalid argument
with pytest.raises(TypeError):
interval == "invalid type"
with pytest.raises(TypeError):
interval != "invalid type"
def test_interval_timedelta():
start = maya.now()
delta = timedelta(hours=1)
interval = maya.MayaInterval(start=start, duration=delta)
assert interval.timedelta == delta
def test_interval_duration():
start = maya.now()
delta = timedelta(hours=1)
interval = maya.MayaInterval(start=start, duration=delta)
assert interval.duration == delta.total_seconds()
@pytest.mark.parametrize(
"start_doy1,end_doy1,start_doy2,end_doy2,expected",
(
(0, 2, 1, 3, False),
(0, 2, 3, 4, False),
(0, 2, 2, 3, False),
(0, 1, 0, 1, True),
(0, 3, 1, 2, True),
),
ids=("overlapping", "non-overlapping", "adjacent", "equal", "subset"),
)
def test_interval_contains(start_doy1, end_doy1, start_doy2, end_doy2, expected):
base = maya.MayaDT.from_datetime(datetime(2016, 1, 1))
interval1 = maya.MayaInterval(base.add(days=start_doy1), base.add(days=end_doy1))
interval2 = maya.MayaInterval(base.add(days=start_doy2), base.add(days=end_doy2))
assert interval1.contains(interval2) is expected
assert (interval2 in interval1) is expected
# check invalid argument
with pytest.raises(TypeError):
interval1.contains("invalid type")
@pytest.mark.parametrize(
"start_doy,end_doy,dt_doy,expected",
(
(2, 4, 1, False),
(2, 4, 2, True),
(2, 4, 3, True),
(2, 4, 4, False),
(2, 4, 5, False),
),
ids=("before-start", "on-start", "during", "on-end", "after-end"),
)
def test_interval_in_operator_maya_dt(start_doy, end_doy, dt_doy, expected):
base = maya.MayaDT.from_datetime(datetime(2016, 1, 1))
interval = maya.MayaInterval(
start=base.add(days=start_doy), end=base.add(days=end_doy)
)
dt = base.add(days=dt_doy)
assert (dt in interval) is expected
# check invalid argument
with pytest.raises(TypeError):
"invalid type" in interval
def test_interval_hash():
start = maya.now()
end = start.add(hours=1)
interval = maya.MayaInterval(start=start, end=end)
assert hash(interval) == hash(maya.MayaInterval(start=start, end=end))
assert hash(interval) != hash(maya.MayaInterval(start=start, end=end.add(days=1)))
def test_interval_iter():
start = maya.now()
end = start.add(days=1)
assert tuple(maya.MayaInterval(start=start, end=end)) == (start, end)
@pytest.mark.parametrize(
"start1,end1,start2,end2,expected",
[(1, 2, 1, 2, 0), (1, 3, 2, 4, -1), (2, 4, 1, 3, 1), (1, 2, 1, 3, -1)],
ids=("equal", "less-than", "greater-than", "use-end-time-if-start-time-identical"),
)
def test_interval_cmp(start1, end1, start2, end2, expected):
base = maya.now()
interval1 = maya.MayaInterval(start=base.add(days=start1), end=base.add(days=end1))
interval2 = maya.MayaInterval(start=base.add(days=start2), end=base.add(days=end2))
assert cmp(interval1, interval2) == expected
# check invalid argument
with pytest.raises(TypeError):
cmp(interval1, "invalid type")
@pytest.mark.parametrize(
"start1,end1,start2,end2,expected",
[
(1, 2, 2, 3, [(1, 3)]),
(1, 3, 2, 4, [(1, 4)]),
(1, 2, 3, 4, [(1, 2), (3, 4)]),
(1, 5, 2, 3, [(1, 5)]),
],
ids=("adjacent", "overlapping", "non-overlapping", "contains"),
)
def test_interval_combine(start1, end1, start2, end2, expected):
base = maya.now()
interval1 = maya.MayaInterval(start=base.add(days=start1), end=base.add(days=end1))
interval2 = maya.MayaInterval(start=base.add(days=start2), end=base.add(days=end2))
expected_intervals = [
maya.MayaInterval(start=base.add(days=start), end=base.add(days=end))
for start, end in expected
]
assert interval1.combine(interval2) == expected_intervals
assert interval2.combine(interval1) == expected_intervals
# check invalid argument
with pytest.raises(TypeError):
interval2.combine("invalid type")
@pytest.mark.parametrize(
"start1,end1,start2,end2,expected",
[
(1, 2, 3, 4, [(1, 2)]),
(1, 2, 2, 4, [(1, 2)]),
(2, 3, 1, 4, []),
(1, 4, 2, 3, [(1, 2), (3, 4)]),
(1, 4, 0, 2, [(2, 4)]),
(1, 4, 3, 5, [(1, 3)]),
(1, 4, 1, 2, [(2, 4)]),
(1, 4, 3, 4, [(1, 3)]),
],
ids=(
"non-overlapping",
"adjacent",
"contains",
"splits",
"overlaps-left",
"overlaps-right",
"overlaps-left-identical-start",
"overlaps-right-identical-end",
),
)
def test_interval_subtract(start1, end1, start2, end2, expected):
base = maya.now()
interval1 = maya.MayaInterval(start=base.add(days=start1), end=base.add(days=end1))
interval2 = maya.MayaInterval(start=base.add(days=start2), end=base.add(days=end2))
expected_intervals = [
maya.MayaInterval(start=base.add(days=start), end=base.add(days=end))
for start, end in expected
]
assert interval1.subtract(interval2) == expected_intervals
# check invalid argument
with pytest.raises(TypeError):
interval1.subtract("invalid type")
@pytest.mark.parametrize(
"start1,end1,start2,end2,expected",
[(1, 2, 2, 3, True), (2, 3, 1, 2, True), (1, 3, 2, 3, False), (2, 3, 4, 5, False)],
ids=("adjacent-right", "adjacent-left", "overlapping", "non-overlapping"),
)
def test_interval_is_adjacent(start1, end1, start2, end2, expected):
base = maya.now()
interval1 = maya.MayaInterval(start=base.add(days=start1), end=base.add(days=end1))
interval2 = maya.MayaInterval(start=base.add(days=start2), end=base.add(days=end2))
assert interval1.is_adjacent(interval2) == expected
# check invalid argument
with pytest.raises(TypeError):
interval1.is_adjacent("invalid type")
@pytest.mark.parametrize(
"start,end,delta,include_remainder,expected",
[
(0, 10, 5, False, [(0, 5), (5, 10)]),
(0, 10, 5, True, [(0, 5), (5, 10)]),
(0, 10, 3, False, [(0, 3), (3, 6), (6, 9)]),
(0, 10, 3, True, [(0, 3), (3, 6), (6, 9), (9, 10)]),
(0, 2, 5, False, []),
(0, 2, 5, True, [(0, 2)]),
],
ids=(
"even-split",
"even-split-include-partial",
"uneven-split-do-not-include-partial",
"uneven-split-include-partial",
"delta-larger-than-timepsan-do-not-include-partial",
"delta-larger-than-timepsan-include-partial",
),
)
def test_interval_split(start, end, delta, include_remainder, expected):
base = maya.now()
interval = maya.MayaInterval(start=base.add(days=start), end=base.add(days=end))
delta = timedelta(days=delta)
expected_intervals = [
maya.MayaInterval(start=base.add(days=s), end=base.add(days=e))
for s, e in expected
]
assert expected_intervals == list(
interval.split(delta, include_remainder=include_remainder)
)
def test_interval_split_non_positive_delta():
start = maya.now()
end = start.add(days=1)
interval = maya.MayaInterval(start=start, end=end)
with pytest.raises(ValueError):
list(interval.split(timedelta(seconds=0)))
with pytest.raises(ValueError):
list(interval.split(timedelta(seconds=-10)))
@pytest.mark.parametrize(
"start,end,minutes,timezone,snap_out,expected_start,expected_end",
[
((5, 12), (8, 48), 30, None, False, (5, 30), (8, 30)),
((5, 12), (8, 48), 30, None, True, (5, 0), (9, 0)),
((5, 15), (9, 0), 15, None, False, (5, 15), (9, 0)),
((5, 15), (9, 0), 15, None, True, (5, 15), (9, 0)),
((6, 50), (9, 15), 60, "America/New_York", False, (7, 0), (9, 0)),
((6, 50), (9, 15), 60, "America/New_York", True, (6, 0), (10, 0)),
((6, 20), (6, 50), 60, None, False, (6, 0), (6, 0)),
((6, 20), (6, 50), 60, None, True, (6, 0), (7, 0)),
((6, 20), (6, 50), 60, "America/Chicago", False, (6, 0), (6, 0)),
((6, 20), (6, 50), 60, "America/Chicago", True, (6, 0), (7, 0)),
],
ids=(
"normal",
"normal-snap_out",
"already-quantized",
"already-quantized-snap_out",
"with-timezone",
"with-timezone-snap_out",
"too-small",
"too-small-snap_out",
"too-small-with-timezone",
"too-small-with-timezone-snap_out",
),
)
def test_quantize(
start, end, minutes, timezone, snap_out, expected_start, expected_end
):
base = maya.MayaDT.from_datetime(datetime(2017, 1, 1))
interval = maya.MayaInterval(
start=base.add(hours=start[0], minutes=start[1]),
end=base.add(hours=end[0], minutes=end[1]),
)
kwargs = {"timezone": timezone} if timezone is not None else {}
quantized_interval = interval.quantize(
timedelta(minutes=minutes), snap_out=snap_out, **kwargs
)
assert quantized_interval == maya.MayaInterval(
start=base.add(hours=expected_start[0], minutes=expected_start[1]),
end=base.add(hours=expected_end[0], minutes=expected_end[1]),
)
def test_quantize_invalid_delta():
start = maya.now()
end = start.add(days=1)
interval = maya.MayaInterval(start=start, end=end)
with pytest.raises(ValueError):
interval.quantize(timedelta(minutes=0))
with pytest.raises(ValueError):
interval.quantize(timedelta(minutes=-1))
def test_interval_flatten_non_overlapping():
step = 2
max_hour = 20
base = maya.now()
intervals = [
maya.MayaInterval(
start=base.add(hours=hour), duration=timedelta(hours=step - 1)
)
for hour in range(0, max_hour, step)
]
random.shuffle(intervals)
assert maya.MayaInterval.flatten(intervals) == sorted(intervals)
def test_interval_flatten_adjacent():
step = 2
max_hour = 20
base = maya.when("jan/1/2011")
intervals = [
maya.MayaInterval(start=base.add(hours=hour), duration=timedelta(hours=step))
for hour in range(0, max_hour, step)
]
random.shuffle(intervals)
assert maya.MayaInterval.flatten(intervals) == [
maya.MayaInterval(start=base, duration=timedelta(hours=max_hour))
]
def test_interval_flatten_intersecting():
step = 2
max_hour = 20
base = maya.now()
intervals = [
maya.MayaInterval(
start=base.add(hours=hour), duration=timedelta(hours=step, minutes=30)
)
for hour in range(0, max_hour, step)
]
random.shuffle(intervals)
assert maya.MayaInterval.flatten(intervals) == [
maya.MayaInterval(start=base, duration=timedelta(hours=max_hour, minutes=30))
]
def test_interval_flatten_containing():
step = 2
max_hour = 20
base = maya.now()
containing_interval = maya.MayaInterval(
start=base, end=base.add(hours=max_hour + step)
)
intervals = [
maya.MayaInterval(
start=base.add(hours=hour), duration=timedelta(hours=step - 1)
)
for hour in range(2, max_hour, step)
]
intervals.append(containing_interval)
random.shuffle(intervals)
assert maya.MayaInterval.flatten(intervals) == [containing_interval]
def test_interval_from_datetime():
start = maya.now()
duration = timedelta(hours=1)
end = start + duration
interval = maya.MayaInterval.from_datetime(
start_dt=start.datetime(naive=False), end_dt=end.datetime(naive=False)
)
assert interval.start == start
assert interval.end == end
interval2 = maya.MayaInterval.from_datetime(
start_dt=start.datetime(naive=False), duration=duration
)
assert interval2.start == start
assert interval2.end == end
interval3 = maya.MayaInterval.from_datetime(
end_dt=end.datetime(naive=False), duration=duration
)
assert interval3.start == start
assert interval3.end == end
def test_interval_iso8601():
start = maya.when("11-17-11 08:09:10")
interval = maya.MayaInterval(start=start, duration=1)
assert interval.iso8601() == "2011-11-17T08:09:10Z/2011-11-17T08:09:11Z"
def test_interval_from_iso8601():
interval = maya.MayaInterval.from_iso8601(
"2018-03-18T14:27:18Z/2018-04-01T04:15:27Z"
)
s = maya.when("2018-03-18T14:27:18Z")
e = maya.when("2018-04-01T04:15:27Z")
assert interval.start == s
assert interval.end == e
def test_interval_from_iso8601_duration():
interval = maya.MayaInterval.from_iso8601("2018-03-18T14:27:18Z/P13DT13H48M9S")
s = maya.when("2018-03-18T14:27:18Z")
e = maya.when("2018-04-01T04:15:27Z")
assert interval.start == s
assert interval.end == e
interval = maya.MayaInterval.from_iso8601("2018-03-05T14:27:18Z/P2W")
s = maya.when("2018-03-05T14:27:18Z")
e = maya.when("2018-03-19T14:27:18Z")
assert interval.start == s
assert interval.end == e
@pytest.mark.parametrize(
"start_string,end_string,interval,expected_count",
[
("2019-01-03 11:40:00Z", "2019-01-03 11:40:20Z", 2, 10),
("2019-01-03 11:40:00Z", "2019-01-03 11:40:30Z", timedelta(seconds=2), 15),
("2019-01-03 11:40:00Z", "2019-01-03 11:45:00Z", 2 * 60, 3),
("2019-01-03 11:40:00Z", "2019-01-03 11:51:00Z", timedelta(minutes=1), 11),
("2019-01-03 11:40:00Z", "2019-01-03 21:40:00Z", 3 * 60 * 60, 4),
("2019-01-03 11:40:00Z", "2019-01-03 13:41:00Z", timedelta(hours=1), 3),
("2019-01-03 11:40:00Z", "2019-01-09 11:40:00Z", 3 * 60 * 60 * 24, 2),
("2019-01-03 11:40:00Z", "2019-01-05 12:00:00Z", timedelta(days=2), 2),
],
ids=(
"seconds",
"seconds-timedelta",
"minutes",
"minutes-timedelta",
"hours",
"hours-timedelta",
"days",
"days-timedelta",
),
)
def test_intervals(start_string, end_string, interval, expected_count):
start = maya.parse(start_string)
end = maya.parse(end_string)
assert len(list(maya.intervals(start, end, interval))) == expected_count
def test_issue_168_regression():
start = maya.now()
end = start.add(weeks=1)
gen = maya.intervals(start=start, end=end, interval=60 * 60 * 24)
# Since the bug causes the generator to never end, first sanity
# check that two results are not the same.
assert next(gen) != next(gen)
assert len(list(maya.intervals(start=start, end=end, interval=60 * 60 * 24))) == 7
| {
"content_hash": "248eec3218e55776e83e6715ce06bf61",
"timestamp": "",
"source": "github",
"line_count": 561,
"max_line_length": 87,
"avg_line_length": 33.627450980392155,
"alnum_prop": 0.6173866949377154,
"repo_name": "kennethreitz/maya",
"id": "acc02a17160a8fa61a467b30885f636975f98c95",
"size": "18865",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_maya_interval.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9775"
},
{
"name": "Makefile",
"bytes": "70"
},
{
"name": "Python",
"bytes": "59879"
}
],
"symlink_target": ""
} |
"""
Controller class for the viewer
"""
import logging
from pylons import config
from pylons.controllers.util import abort, redirect
from turbulenz_local.controllers import BaseController
from turbulenz_local.models.gamelist import get_game_by_slug
LOG = logging.getLogger(__name__)
class ViewerController(BaseController):
viewer_app = config.get('viewer.app', 'viewer')
viewer_type = config.get('viewer.type', 'canvas')
viewer_mode = config.get('viewer.mode', 'release')
@classmethod
def app(cls, slug, asset):
game = get_game_by_slug(slug)
if not game:
abort(404, 'Game does not exist: %s' % slug)
asset_url = '/play/' + slug + '/'
querystring = '?assetpath=%s&baseurl=%s&mapping_table=%s' % (asset, asset_url, game.mapping_table)
viewer_url = '/%s#/play/%s/%s.%s.%s.html' % (querystring, cls.viewer_app, cls.viewer_app,
cls.viewer_type, cls.viewer_mode)
redirect(viewer_url)
| {
"content_hash": "838e0abca51a5ba57674c925be84fbb9",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 106,
"avg_line_length": 32.87096774193548,
"alnum_prop": 0.6310107948969578,
"repo_name": "turbulenz/turbulenz_local",
"id": "7ea2c6f10bdaf33e402bb55da234f538d712f5d9",
"size": "1073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turbulenz_local/controllers/viewer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "282"
},
{
"name": "CSS",
"bytes": "29719"
},
{
"name": "HTML",
"bytes": "54841"
},
{
"name": "JavaScript",
"bytes": "200107"
},
{
"name": "Python",
"bytes": "459206"
}
],
"symlink_target": ""
} |
"""
Triggers define what causes a Jenkins job to start building.
**Component**: triggers
:Macro: trigger
:Entry Point: jenkins_jobs.triggers
Example::
job:
name: test_job
triggers:
- timed: '@daily'
"""
import six
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.modules import hudson_model
from jenkins_jobs.errors import (InvalidAttributeError,
JenkinsJobsException,
MissingAttributeError)
import logging
import re
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
logger = logging.getLogger(str(__name__))
def gerrit_handle_legacy_configuration(data):
hyphenizer = re.compile("[A-Z]")
def hyphenize(attr):
"""Convert strings like triggerOn to trigger-on.
"""
return hyphenizer.sub(lambda x: "-%s" % x.group(0).lower(),
attr)
def convert_dict(d, old_keys):
for old_key in old_keys:
if old_key in d:
new_key = hyphenize(old_key)
logger.warn("'%s' is deprecated and will be removed after "
"1.0.0, please use '%s' instead", old_key, new_key)
d[new_key] = d[old_key]
del d[old_key]
convert_dict(data, [
'triggerOnPatchsetUploadedEvent',
'triggerOnChangeAbandonedEvent',
'triggerOnChangeMergedEvent',
'triggerOnChangeRestoredEvent',
'triggerOnCommentAddedEvent',
'triggerOnDraftPublishedEvent',
'triggerOnRefUpdatedEvent',
'triggerApprovalCategory',
'triggerApprovalValue',
'overrideVotes',
'gerritBuildSuccessfulVerifiedValue',
'gerritBuildFailedVerifiedValue',
'failureMessage',
'skipVote',
])
for project in data['projects']:
convert_dict(project, [
'projectCompareType',
'projectPattern',
'branchCompareType',
'branchPattern',
])
old_format_events = OrderedDict(
(key, should_register) for key, should_register in six.iteritems(data)
if key.startswith('trigger-on-'))
trigger_on = data.setdefault('trigger-on', [])
if old_format_events:
logger.warn("The events: %s; which you used is/are deprecated. "
"Please use 'trigger-on' instead.",
', '.join(old_format_events))
if old_format_events and trigger_on:
raise JenkinsJobsException(
'Both, the new format (trigger-on) and old format (trigger-on-*) '
'gerrit events format found. Please use either the new or the old '
'format of trigger events definition.')
trigger_on.extend(event_name[len('trigger-on-'):]
for event_name, should_register
in six.iteritems(old_format_events) if should_register)
for idx, event in enumerate(trigger_on):
if event == 'comment-added-event':
trigger_on[idx] = events = OrderedDict()
events['comment-added-event'] = OrderedDict((
('approval-category', data['trigger-approval-category']),
('approval-value', data['trigger-approval-value'])
))
def build_gerrit_triggers(xml_parent, data):
available_simple_triggers = {
'change-abandoned-event': 'PluginChangeAbandonedEvent',
'change-merged-event': 'PluginChangeMergedEvent',
'change-restored-event': 'PluginChangeRestoredEvent',
'draft-published-event': 'PluginDraftPublishedEvent',
'patchset-uploaded-event': 'PluginPatchsetCreatedEvent',
'patchset-created-event': 'PluginPatchsetCreatedEvent',
'ref-updated-event': 'PluginRefUpdatedEvent',
}
tag_namespace = 'com.sonyericsson.hudson.plugins.gerrit.trigger.' \
'hudsontrigger.events'
trigger_on_events = XML.SubElement(xml_parent, 'triggerOnEvents')
for event in data.get('trigger-on', []):
if isinstance(event, six.string_types):
tag_name = available_simple_triggers.get(event)
if event == 'patchset-uploaded-event':
logger.warn("'%s' is deprecated. Use 'patchset-created-event' "
"format instead.", event)
if not tag_name:
known = ', '.join(available_simple_triggers.keys()
+ ['comment-added-event',
'comment-added-contains-event'])
msg = ("The event '%s' under 'trigger-on' is not one of the "
"known: %s.") % (event, known)
raise JenkinsJobsException(msg)
XML.SubElement(trigger_on_events,
'%s.%s' % (tag_namespace, tag_name))
else:
if 'patchset-created-event' in event.keys():
pce = event['patchset-created-event']
pc = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace, 'PluginPatchsetCreatedEvent'))
XML.SubElement(pc, 'excludeDrafts').text = str(
pce.get('exclude-drafts', False)).lower()
XML.SubElement(pc, 'excludeTrivialRebase').text = str(
pce.get('exclude-trivial-rebase', False)).lower()
XML.SubElement(pc, 'excludeNoCodeChange').text = str(
pce.get('exclude-no-code-change', False)).lower()
if 'comment-added-event' in event.keys():
comment_added_event = event['comment-added-event']
cadded = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace, 'PluginCommentAddedEvent'))
XML.SubElement(cadded, 'verdictCategory').text = \
comment_added_event['approval-category']
XML.SubElement(
cadded,
'commentAddedTriggerApprovalValue').text = \
str(comment_added_event['approval-value'])
if 'comment-added-contains-event' in event.keys():
comment_added_event = event['comment-added-contains-event']
caddedc = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace,
'PluginCommentAddedContainsEvent'))
XML.SubElement(caddedc, 'commentAddedCommentContains').text = \
comment_added_event['comment-contains-value']
def build_gerrit_skip_votes(xml_parent, data):
outcomes = [('successful', 'onSuccessful'),
('failed', 'onFailed'),
('unstable', 'onUnstable'),
('notbuilt', 'onNotBuilt')]
skip_vote_node = XML.SubElement(xml_parent, 'skipVote')
skip_vote = data.get('skip-vote', {})
for result_kind, tag_name in outcomes:
if skip_vote.get(result_kind, False):
XML.SubElement(skip_vote_node, tag_name).text = 'true'
else:
XML.SubElement(skip_vote_node, tag_name).text = 'false'
def gerrit(parser, xml_parent, data):
"""yaml: gerrit
Trigger on a Gerrit event.
Requires the Jenkins :jenkins-wiki:`Gerrit Trigger Plugin <Gerrit+Trigger>`
version >= 2.6.0.
:arg list trigger-on: Events to react on. Please use either the new
**trigger-on**, or the old **trigger-on-*** events definitions. You
cannot use both at once.
.. _trigger_on:
:Trigger on:
* **patchset-created-event** (`dict`) -- Trigger upon patchset
creation.
:Patchset created:
* **exclude-drafts** (`bool`) -- exclude drafts (Default: False)
* **exclude-trivial-rebase** (`bool`) -- exclude trivial rebase
(Default: False)
* **exclude-no-code-change** (`bool`) -- exclude no code change
(Default: False)
Exclude drafts|trivial-rebase|no-code-change needs
Gerrit Trigger v2.12.0
* **patchset-uploaded-event** -- Trigger upon patchset creation
(this is a alias for `patchset-created-event`).
.. deprecated:: 1.1.0 Please use :ref:`trigger-on <trigger_on>`.
* **change-abandoned-event** -- Trigger on patchset abandoned.
Requires Gerrit Trigger Plugin version >= 2.8.0.
* **change-merged-event** -- Trigger on change merged
* **change-restored-event** -- Trigger on change restored. Requires
Gerrit Trigger Plugin version >= 2.8.0
* **draft-published-event** -- Trigger on draft published event.
* **ref-updated-event** -- Trigger on ref-updated.
* **comment-added-event** (`dict`) -- Trigger on comment added.
:Comment added:
* **approval-category** (`str`) -- Approval (verdict) category
(for example 'APRV', 'CRVW', 'VRIF' -- see `Gerrit access
control
<http://gerrit.googlecode.com/svn/documentation/2.1/
access-control.html#categories>`_
* **approval-value** -- Approval value for the comment added.
* **comment-added-contains-event** (`dict`) -- Trigger on comment
added contains Regular Expression.
:Comment added contains:
* **comment-contains-value** (`str`) -- Comment contains
Regular Expression value.
:arg bool trigger-on-patchset-uploaded-event: Trigger on patchset upload.
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-abandoned-event: Trigger on change abandoned.
Requires Gerrit Trigger Plugin version >= 2.8.0
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-merged-event: Trigger on change merged
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-restored-event: Trigger on change restored.
Requires Gerrit Trigger Plugin version >= 2.8.0
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-comment-added-event: Trigger on comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-draft-published-event: Trigger on draft published
event
.. deprecated:: 1.1.0 Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-ref-updated-event: Trigger on ref-updated
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg str trigger-approval-category: Approval category for comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg int trigger-approval-value: Approval value for comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool override-votes: Override default vote values
:arg int gerrit-build-started-verified-value: Started ''Verified'' value
:arg int gerrit-build-successful-verified-value: Successful ''Verified''
value
:arg int gerrit-build-failed-verified-value: Failed ''Verified'' value
:arg int gerrit-build-unstable-verified-value: Unstable ''Verified'' value
:arg int gerrit-build-notbuilt-verified-value: Not built ''Verified''
value
:arg int gerrit-build-started-codereview-value: Started ''CodeReview''
value
:arg int gerrit-build-successful-codereview-value: Successful
''CodeReview'' value
:arg int gerrit-build-failed-codereview-value: Failed ''CodeReview'' value
:arg int gerrit-build-unstable-codereview-value: Unstable ''CodeReview''
value
:arg int gerrit-build-notbuilt-codereview-value: Not built ''CodeReview''
value
:arg str failure-message: Message to leave on failure (default '')
:arg str successful-message: Message to leave on success (default '')
:arg str unstable-message: Message to leave when unstable (default '')
:arg str notbuilt-message: Message to leave when not built (default '')
:arg str failure-message-file: Sets the filename within the workspace from
which to retrieve the unsuccessful review message. (optional)
:arg list projects: list of projects to match
:Project: * **project-compare-type** (`str`) -- ''PLAIN'', ''ANT'' or
''REG_EXP''
* **project-pattern** (`str`) -- Project name pattern to match
* **branch-compare-type** (`str`) -- ''PLAIN'', ''ANT'' or
''REG_EXP'' (not used if `branches` list is specified)
.. deprecated:: 1.1.0 Please use :ref:`branches <branches>`.
* **branch-pattern** (`str`) -- Branch name pattern to match
(not used if `branches` list is specified)
.. deprecated:: 1.1.0 Please use :ref:`branches <branches>`.
.. _branches:
* **branches** (`list`) -- List of branches to match
(optional)
:Branch: * **branch-compare-type** (`str`) -- ''PLAIN'',
''ANT'' or ''REG_EXP'' (optional) (default
''PLAIN'')
* **branch-pattern** (`str`) -- Branch name pattern
to match
* **file-paths** (`list`) -- List of file paths to match
(optional)
:File Path: * **compare-type** (`str`) -- ''PLAIN'', ''ANT''
or ''REG_EXP'' (optional) (default ''PLAIN'')
* **pattern** (`str`) -- File path pattern to
match
* **forbidden-file-paths** (`list`) -- List of file paths to
skip triggering (optional)
:Forbidden File Path: * **compare-type** (`str`) --
''PLAIN'', ''ANT'' or ''REG_EXP'' (optional)
(default ''PLAIN'')
* **pattern** (`str`) -- File path pattern to
match
* **topics** (`list`) -- List of topics to match
(optional)
:File Path: * **compare-type** (`str`) -- ''PLAIN'', ''ANT''
or ''REG_EXP'' (optional) (default ''PLAIN'')
* **pattern** (`str`) -- Topic name pattern to
match
:arg dict skip-vote: map of build outcomes for which Jenkins must skip
vote. Requires Gerrit Trigger Plugin version >= 2.7.0
:Outcome: * **successful** (`bool`)
* **failed** (`bool`)
* **unstable** (`bool`)
* **notbuilt** (`bool`)
:arg bool silent: When silent mode is on there will be no communication
back to Gerrit, i.e. no build started/failed/successful approve
messages etc. If other non-silent jobs are triggered by the same
Gerrit event as this job, the result of this job's build will not be
counted in the end result of the other jobs. (default false)
:arg bool silent-start: Sets silent start mode to on or off. When silent
start mode is on there will be no 'build started' messages sent back
to Gerrit. (default false)
:arg bool escape-quotes: escape quotes in the values of Gerrit change
parameters (default true)
:arg bool no-name-and-email: Do not pass compound 'name and email'
parameters (default false)
:arg bool readable-message: If parameters regarding multiline text,
e.g. commit message, should be as human readable or not. If false,
those parameters are Base64 encoded to keep environment variables
clean. (default false)
:arg str dependency-jobs: All jobs on which this job depends. If a commit
should trigger both a dependency and this job, the dependency will be
built first. Use commas to separate job names. Beware of cyclic
dependencies. (optional)
:arg str notification-level: Defines to whom email notifications should be
sent. This can either be nobody ('NONE'), the change owner ('OWNER'),
reviewers and change owner ('OWNER_REVIEWERS'), all interested users
i.e. owning, reviewing, watching, and starring ('ALL') or server
default ('SERVER_DEFAULT'). (default 'SERVER_DEFAULT')
:arg bool dynamic-trigger-enabled: Enable/disable the dynamic trigger
(default false)
:arg str dynamic-trigger-url: if you specify this option, the Gerrit
trigger configuration will be fetched from there on a regular interval
:arg bool trigger-for-unreviewed-patches: trigger patchset-created events
for changes that were uploaded while connection to Gerrit was down
(default false). Requires Gerrit Trigger Plugin version >= 2.11.0
:arg str custom-url: Custom URL for a message sent to Gerrit. Build
details URL will be used if empty. (default '')
:arg str server-name: Name of the server to trigger on, or ''__ANY__'' to
trigger on any configured Gerrit server (default '__ANY__'). Requires
Gerrit Trigger Plugin version >= 2.11.0
You may select one or more Gerrit events upon which to trigger.
You must also supply at least one project and branch, optionally
more. If you select the comment-added trigger, you should also
indicate which approval category and value you want to trigger the
job.
Until version 0.4.0 of Jenkins Job Builder, camelCase keys were used to
configure Gerrit Trigger Plugin, instead of hyphenated-keys. While still
supported, camedCase keys are deprecated and should not be used. Support
for this will be removed after 1.0.0 is released.
Example:
.. literalinclude:: /../../tests/triggers/fixtures/gerrit004.yaml
:language: yaml
"""
def get_compare_type(xml_tag, compare_type):
valid_compare_types = ['PLAIN',
'ANT',
'REG_EXP']
if compare_type not in valid_compare_types:
raise InvalidAttributeError(xml_tag, compare_type,
valid_compare_types)
return compare_type
gerrit_handle_legacy_configuration(data)
projects = data['projects']
gtrig = XML.SubElement(xml_parent,
'com.sonyericsson.hudson.plugins.gerrit.trigger.'
'hudsontrigger.GerritTrigger')
XML.SubElement(gtrig, 'spec')
gprojects = XML.SubElement(gtrig, 'gerritProjects')
for project in projects:
gproj = XML.SubElement(gprojects,
'com.sonyericsson.hudson.plugins.gerrit.'
'trigger.hudsontrigger.data.GerritProject')
XML.SubElement(gproj, 'compareType').text = get_compare_type(
'project-compare-type', project['project-compare-type'])
XML.SubElement(gproj, 'pattern').text = project['project-pattern']
branches = XML.SubElement(gproj, 'branches')
project_branches = project.get('branches', [])
if 'branch-compare-type' in project and 'branch-pattern' in project:
warning = 'branch-compare-type and branch-pattern at project ' \
'level are deprecated and support will be removed ' \
'in a later version of Jenkins Job Builder; '
if project_branches:
warning += 'discarding values and using values from ' \
'branches section'
else:
warning += 'please use branches section instead'
logger.warn(warning)
if not project_branches:
project_branches = [
{'branch-compare-type': project['branch-compare-type'],
'branch-pattern': project['branch-pattern']}]
for branch in project_branches:
gbranch = XML.SubElement(
branches, 'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.Branch')
XML.SubElement(gbranch, 'compareType').text = get_compare_type(
'branch-compare-type', branch['branch-compare-type'])
XML.SubElement(gbranch, 'pattern').text = branch['branch-pattern']
project_file_paths = project.get('file-paths', [])
if project_file_paths:
fps_tag = XML.SubElement(gproj, 'filePaths')
for file_path in project_file_paths:
fp_tag = XML.SubElement(fps_tag,
'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.'
'FilePath')
XML.SubElement(fp_tag, 'compareType').text = get_compare_type(
'compare-type', file_path.get('compare-type', 'PLAIN'))
XML.SubElement(fp_tag, 'pattern').text = file_path['pattern']
project_forbidden_file_paths = project.get('forbidden-file-paths', [])
if project_forbidden_file_paths:
ffps_tag = XML.SubElement(gproj, 'forbiddenFilePaths')
for forbidden_file_path in project_forbidden_file_paths:
ffp_tag = XML.SubElement(ffps_tag,
'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.'
'FilePath')
XML.SubElement(ffp_tag, 'compareType').text = get_compare_type(
'compare-type', forbidden_file_path.get('compare-type',
'PLAIN'))
XML.SubElement(ffp_tag, 'pattern').text = \
forbidden_file_path['pattern']
topics = project.get('topics', [])
if topics:
topics_tag = XML.SubElement(gproj, 'topics')
for topic in topics:
topic_tag = XML.SubElement(topics_tag,
'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.'
'Topic')
XML.SubElement(topic_tag, 'compareType').text = \
get_compare_type('compare-type', topic.get('compare-type',
'PLAIN'))
XML.SubElement(topic_tag, 'pattern').text = topic['pattern']
build_gerrit_skip_votes(gtrig, data)
XML.SubElement(gtrig, 'silentMode').text = str(
data.get('silent', False)).lower()
XML.SubElement(gtrig, 'silentStartMode').text = str(
data.get('silent-start', False)).lower()
XML.SubElement(gtrig, 'escapeQuotes').text = str(
data.get('escape-quotes', True)).lower()
XML.SubElement(gtrig, 'noNameAndEmailParameters').text = str(
data.get('no-name-and-email', False)).lower()
XML.SubElement(gtrig, 'readableMessage').text = str(
data.get('readable-message', False)).lower()
XML.SubElement(gtrig, 'dependencyJobsNames').text = str(
data.get('dependency-jobs', ''))
notification_levels = ['NONE', 'OWNER', 'OWNER_REVIEWERS', 'ALL',
'SERVER_DEFAULT']
notification_level = data.get('notification-level', 'SERVER_DEFAULT')
if notification_level not in notification_levels:
raise InvalidAttributeError('notification-level', notification_level,
notification_levels)
if notification_level == 'SERVER_DEFAULT':
XML.SubElement(gtrig, 'notificationLevel').text = ''
else:
XML.SubElement(gtrig, 'notificationLevel').text = notification_level
XML.SubElement(gtrig, 'dynamicTriggerConfiguration').text = str(
data.get('dynamic-trigger-enabled', False))
XML.SubElement(gtrig, 'triggerConfigURL').text = str(
data.get('dynamic-trigger-url', ''))
XML.SubElement(gtrig, 'allowTriggeringUnreviewedPatches').text = str(
data.get('trigger-for-unreviewed-patches', False)).lower()
build_gerrit_triggers(gtrig, data)
override = str(data.get('override-votes', False)).lower()
if override == 'true':
for yamlkey, xmlkey in [('gerrit-build-started-verified-value',
'gerritBuildStartedVerifiedValue'),
('gerrit-build-successful-verified-value',
'gerritBuildSuccessfulVerifiedValue'),
('gerrit-build-failed-verified-value',
'gerritBuildFailedVerifiedValue'),
('gerrit-build-unstable-verified-value',
'gerritBuildUnstableVerifiedValue'),
('gerrit-build-notbuilt-verified-value',
'gerritBuildNotBuiltVerifiedValue'),
('gerrit-build-started-codereview-value',
'gerritBuildStartedCodeReviewValue'),
('gerrit-build-successful-codereview-value',
'gerritBuildSuccessfulCodeReviewValue'),
('gerrit-build-failed-codereview-value',
'gerritBuildFailedCodeReviewValue'),
('gerrit-build-unstable-codereview-value',
'gerritBuildUnstableCodeReviewValue'),
('gerrit-build-notbuilt-codereview-value',
'gerritBuildNotBuiltCodeReviewValue')]:
if data.get(yamlkey) is not None:
# str(int(x)) makes input values like '+1' work
XML.SubElement(gtrig, xmlkey).text = str(
int(data.get(yamlkey)))
XML.SubElement(gtrig, 'buildStartMessage').text = str(
data.get('start-message', ''))
XML.SubElement(gtrig, 'buildFailureMessage').text = \
data.get('failure-message', '')
XML.SubElement(gtrig, 'buildSuccessfulMessage').text = str(
data.get('successful-message', ''))
XML.SubElement(gtrig, 'buildUnstableMessage').text = str(
data.get('unstable-message', ''))
XML.SubElement(gtrig, 'buildNotBuiltMessage').text = str(
data.get('notbuilt-message', ''))
XML.SubElement(gtrig, 'buildUnsuccessfulFilepath').text = str(
data.get('failure-message-file', ''))
XML.SubElement(gtrig, 'customUrl').text = str(data.get('custom-url', ''))
XML.SubElement(gtrig, 'serverName').text = str(
data.get('server-name', '__ANY__'))
def pollscm(parser, xml_parent, data):
"""yaml: pollscm
Poll the SCM to determine if there has been a change.
:Parameter: the polling interval (cron syntax)
.. deprecated:: 1.3.0. Please use :ref:`cron <cron>`.
.. _cron:
:arg string cron: the polling interval (cron syntax, required)
:arg bool ignore-post-commit-hooks: Ignore changes notified by SCM
post-commit hooks. The subversion-plugin supports this since
version 1.44. (default false)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/pollscm002.yaml
:language: yaml
"""
try:
cron = data['cron']
ipch = str(data.get('ignore-post-commit-hooks', False)).lower()
except KeyError as e:
# ensure specific error on the attribute not being set is raised
# for new format
raise MissingAttributeError(e)
except TypeError:
# To keep backward compatibility
logger.warn("Your pollscm usage is deprecated, please use"
" the syntax described in the documentation"
" instead")
cron = data
ipch = 'false'
if not cron:
raise InvalidAttributeError('cron', cron)
scmtrig = XML.SubElement(xml_parent, 'hudson.triggers.SCMTrigger')
XML.SubElement(scmtrig, 'spec').text = cron
XML.SubElement(scmtrig, 'ignorePostCommitHooks').text = ipch
def build_pollurl_content_type(xml_parent, entries, prefix,
collection_name, element_name):
namespace = 'org.jenkinsci.plugins.urltrigger.content'
content_type = XML.SubElement(
xml_parent, '{0}.{1}ContentType'.format(namespace, prefix))
if entries:
collection = XML.SubElement(content_type, collection_name)
for entry in entries:
content_entry = XML.SubElement(
collection, '{0}.{1}ContentEntry'.format(namespace, prefix))
XML.SubElement(content_entry, element_name).text = entry
def pollurl(parser, xml_parent, data):
"""yaml: pollurl
Trigger when the HTTP response from a URL changes.
Requires the Jenkins :jenkins-wiki:`URLTrigger Plugin <URLTrigger+Plugin>`.
:arg string cron: cron syntax of when to run (default '')
:arg string polling-node: Restrict where the polling should run.
(optional)
:arg list urls: List of URLs to monitor
:URL: * **url** (`str`) -- URL to monitor for changes (required)
* **proxy** (`bool`) -- Activate the Jenkins proxy (default false)
* **timeout** (`int`) -- Connect/read timeout in seconds
(default 300)
* **username** (`string`) -- User name for basic authentication
(optional)
* **password** (`string`) -- Password for basic authentication
(optional)
* **check-status** (`int`) -- Check for a specific HTTP status
code (optional)
* **check-etag** (`bool`) -- Check the HTTP ETag for changes
(default false)
* **check-date** (`bool`) -- Check the last modification date of
the URL (default false)
* **check-content** (`list`) -- List of content type changes to
monitor
:Content Type: * **simple** (`bool`) -- Trigger on any change to
the content of the URL (default false)
* **json** (`list`) -- Trigger on any change to
the listed JSON paths
* **text** (`list`) -- Trigger on any change to
the listed regular expressions
* **xml** (`list`) -- Trigger on any change to
the listed XPath expressions
Example:
.. literalinclude:: /../../tests/triggers/fixtures/pollurl001.yaml
"""
valid_content_types = {
'simple': ['Simple', '', '', []],
'json': ['JSON', 'jsonPaths', 'jsonPath', None],
'text': ['TEXT', 'regExElements', 'regEx', None],
'xml': ['XML', 'xPaths', 'xPath', None]
}
urltrig = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.urltrigger.URLTrigger')
node = data.get('polling-node')
XML.SubElement(urltrig, 'spec').text = data.get('cron', '')
XML.SubElement(urltrig, 'labelRestriction').text = str(bool(node)).lower()
if node:
XML.SubElement(urltrig, 'triggerLabel').text = node
entries = XML.SubElement(urltrig, 'entries')
urls = data.get('urls', [])
if not urls:
raise JenkinsJobsException('At least one url must be provided')
for url in urls:
entry = XML.SubElement(entries,
'org.jenkinsci.plugins.urltrigger.'
'URLTriggerEntry')
XML.SubElement(entry, 'url').text = url['url']
XML.SubElement(entry, 'proxyActivated').text = \
str(url.get('proxy', False)).lower()
if 'username' in url:
XML.SubElement(entry, 'username').text = url['username']
if 'password' in url:
XML.SubElement(entry, 'password').text = url['password']
if 'check-status' in url:
XML.SubElement(entry, 'checkStatus').text = 'true'
XML.SubElement(entry, 'statusCode').text = \
str(url.get('check-status'))
else:
XML.SubElement(entry, 'checkStatus').text = 'false'
XML.SubElement(entry, 'statusCode').text = '200'
XML.SubElement(entry, 'timeout').text = \
str(url.get('timeout', 300))
XML.SubElement(entry, 'checkETag').text = \
str(url.get('check-etag', False)).lower()
XML.SubElement(entry, 'checkLastModificationDate').text = \
str(url.get('check-date', False)).lower()
check_content = url.get('check-content', [])
XML.SubElement(entry, 'inspectingContent').text = \
str(bool(check_content)).lower()
content_types = XML.SubElement(entry, 'contentTypes')
for entry in check_content:
type_name = next(iter(entry.keys()))
if type_name not in valid_content_types:
raise JenkinsJobsException('check-content must be one of : %s'
% ', '.join(valid_content_types.
keys()))
content_type = valid_content_types.get(type_name)
if entry[type_name]:
sub_entries = content_type[3]
if sub_entries is None:
sub_entries = entry[type_name]
build_pollurl_content_type(content_types,
sub_entries,
*content_type[0:3])
def timed(parser, xml_parent, data):
"""yaml: timed
Trigger builds at certain times.
:Parameter: when to run the job (cron syntax)
Example::
triggers:
- timed: "@midnight"
"""
scmtrig = XML.SubElement(xml_parent, 'hudson.triggers.TimerTrigger')
XML.SubElement(scmtrig, 'spec').text = data
def github(parser, xml_parent, data):
"""yaml: github
Trigger a job when github repository is pushed to.
Requires the Jenkins :jenkins-wiki:`GitHub Plugin <GitHub+Plugin>`.
Example::
triggers:
- github
"""
ghtrig = XML.SubElement(xml_parent, 'com.cloudbees.jenkins.'
'GitHubPushTrigger')
XML.SubElement(ghtrig, 'spec').text = ''
def github_pull_request(parser, xml_parent, data):
"""yaml: github-pull-request
Build pull requests in github and report results.
Requires the Jenkins :jenkins-wiki:`GitHub Pull Request Builder Plugin
<GitHub+pull+request+builder+plugin>`.
:arg list admin-list: the users with admin rights (optional)
:arg list white-list: users whose pull requests build (optional)
:arg list org-list: orgs whose users should be white listed (optional)
:arg bool allow-whitelist-orgs-as-admins: members of white listed orgs
will have admin rights. (default false)
:arg string cron: cron syntax of when to run (optional)
:arg string trigger-phrase: when filled, commenting this phrase
in the pull request will trigger a build (optional)
:arg bool only-trigger-phrase: only commenting the trigger phrase
in the pull request will trigger a build (default false)
:arg bool github-hooks: use github hook (default false)
:arg bool permit-all: build every pull request automatically
without asking (default false)
:arg bool auto-close-on-fail: close failed pull request automatically
(default false)
:arg list white-list-target-branches: Adding branches to this whitelist
allows you to selectively test pull requests destined for these
branches only. Supports regular expressions (e.g. 'master',
'feature-.*'). (optional)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/github-pull-request.yaml
"""
ghprb = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.ghprb.'
'GhprbTrigger')
XML.SubElement(ghprb, 'spec').text = data.get('cron', '')
admin_string = "\n".join(data.get('admin-list', []))
XML.SubElement(ghprb, 'adminlist').text = admin_string
XML.SubElement(ghprb, 'allowMembersOfWhitelistedOrgsAsAdmin').text = str(
data.get('allow-whitelist-orgs-as-admins', False)).lower()
white_string = "\n".join(data.get('white-list', []))
XML.SubElement(ghprb, 'whitelist').text = white_string
org_string = "\n".join(data.get('org-list', []))
XML.SubElement(ghprb, 'orgslist').text = org_string
XML.SubElement(ghprb, 'cron').text = data.get('cron', '')
XML.SubElement(ghprb, 'triggerPhrase').text = \
data.get('trigger-phrase', '')
XML.SubElement(ghprb, 'onlyTriggerPhrase').text = str(
data.get('only-trigger-phrase', False)).lower()
XML.SubElement(ghprb, 'useGitHubHooks').text = str(
data.get('github-hooks', False)).lower()
XML.SubElement(ghprb, 'permitAll').text = str(
data.get('permit-all', False)).lower()
XML.SubElement(ghprb, 'autoCloseFailedPullRequests').text = str(
data.get('auto-close-on-fail', False)).lower()
white_list_target_branches = data.get('white-list-target-branches', [])
if white_list_target_branches:
ghprb_wltb = XML.SubElement(ghprb, 'whiteListTargetBranches')
for branch in white_list_target_branches:
be = XML.SubElement(ghprb_wltb, 'org.jenkinsci.plugins.'
'ghprb.GhprbBranch')
XML.SubElement(be, 'branch').text = str(branch)
def gitlab_merge_request(parser, xml_parent, data):
"""yaml: gitlab-merge-request
Build merge requests in gitlab and report results.
Requires the Jenkins :jenkins-wiki:`Gitlab MergeRequest Builder Plugin.
<Gitlab+Merge+Request+Builder+Plugin>`.
:arg string cron: cron syntax of when to run (required)
:arg string project-path: gitlab-relative path to project (required)
Example:
.. literalinclude:: \
/../../tests/triggers/fixtures/gitlab-merge-request.yaml
"""
ghprb = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.gitlab.'
'GitlabBuildTrigger')
if not data.get('cron', None):
raise jenkins_jobs.errors.JenkinsJobsException(
'gitlab-merge-request is missing "cron"')
if not data.get('project-path', None):
raise jenkins_jobs.errors.JenkinsJobsException(
'gitlab-merge-request is missing "project-path"')
# Because of a design limitation in the GitlabBuildTrigger Jenkins plugin
# both 'spec' and '__cron' have to be set to the same value to have them
# take effect. Also, cron and projectPath are prefixed with underscores
# in the plugin, but spec is not.
XML.SubElement(ghprb, 'spec').text = data.get('cron')
XML.SubElement(ghprb, '__cron').text = data.get('cron')
XML.SubElement(ghprb, '__projectPath').text = data.get('project-path')
def build_result(parser, xml_parent, data):
"""yaml: build-result
Configure jobB to monitor jobA build result. A build is scheduled if there
is a new build result that matches your criteria (unstable, failure, ...).
Requires the Jenkins :jenkins-wiki:`BuildResultTrigger Plugin
<BuildResultTrigger+Plugin>`.
:arg list groups: List groups of jobs and results to monitor for
:arg list jobs: The jobs to monitor (required)
:arg list results: Build results to monitor for (default success)
:arg bool combine: Combine all job information. A build will be
scheduled only if all conditions are met (default false)
:arg str cron: The cron syntax with which to poll the jobs for the
supplied result (default '')
Example::
triggers:
- build-result:
combine: true
cron: '* * * * *'
groups:
- jobs:
- foo
- example
results:
- unstable
- jobs:
- foo2
results:
- not-built
- aborted
"""
brt = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.'
'buildresulttrigger.BuildResultTrigger')
XML.SubElement(brt, 'spec').text = data.get('cron', '')
XML.SubElement(brt, 'combinedJobs').text = str(
data.get('combine', False)).lower()
jobs_info = XML.SubElement(brt, 'jobsInfo')
result_dict = {'success': 'SUCCESS',
'unstable': 'UNSTABLE',
'failure': 'FAILURE',
'not-built': 'NOT_BUILT',
'aborted': 'ABORTED'}
for group in data['groups']:
brti = XML.SubElement(jobs_info, 'org.jenkinsci.plugins.'
'buildresulttrigger.model.'
'BuildResultTriggerInfo')
if not group.get('jobs', []):
raise jenkins_jobs.errors.\
JenkinsJobsException('Jobs is missing and a required'
' element')
jobs_string = ",".join(group['jobs'])
XML.SubElement(brti, 'jobNames').text = jobs_string
checked_results = XML.SubElement(brti, 'checkedResults')
for result in group.get('results', ['success']):
if result not in result_dict:
raise jenkins_jobs.errors.\
JenkinsJobsException('Result entered is not valid,'
' must be one of: '
+ ', '.join(result_dict.keys()))
model_checked = XML.SubElement(checked_results, 'org.jenkinsci.'
'plugins.buildresulttrigger.model.'
'CheckedResult')
XML.SubElement(model_checked, 'checked').text = result_dict[result]
def reverse(parser, xml_parent, data):
"""yaml: reverse
This trigger can be configured in the UI using the checkbox with the
following text: 'Build after other projects are built'.
Set up a trigger so that when some other projects finish building, a new
build is scheduled for this project. This is convenient for running an
extensive test after a build is complete, for example.
This configuration complements the "Build other projects" section in the
"Post-build Actions" of an upstream project, but is preferable when you
want to configure the downstream project.
:arg str jobs: List of jobs to watch. Can be either a comma separated
list or a list.
:arg str result: Build results to monitor for between the following
options: success, unstable and failure. (default 'success').
Example:
.. literalinclude:: /../../tests/triggers/fixtures/reverse.yaml
Example List:
.. literalinclude:: /../../tests/triggers/fixtures/reverse-list.yaml
"""
reserveBuildTrigger = XML.SubElement(
xml_parent, 'jenkins.triggers.ReverseBuildTrigger')
supported_thresholds = ['SUCCESS', 'UNSTABLE', 'FAILURE']
XML.SubElement(reserveBuildTrigger, 'spec').text = ''
jobs = data.get('jobs')
if isinstance(jobs, list):
jobs = ",".join(jobs)
XML.SubElement(reserveBuildTrigger, 'upstreamProjects').text = \
jobs
threshold = XML.SubElement(reserveBuildTrigger, 'threshold')
result = data.get('result').upper()
if result not in supported_thresholds:
raise jenkins_jobs.errors.JenkinsJobsException(
"Choice should be one of the following options: %s." %
", ".join(supported_thresholds))
XML.SubElement(threshold, 'name').text = \
hudson_model.THRESHOLDS[result]['name']
XML.SubElement(threshold, 'ordinal').text = \
hudson_model.THRESHOLDS[result]['ordinal']
XML.SubElement(threshold, 'color').text = \
hudson_model.THRESHOLDS[result]['color']
XML.SubElement(threshold, 'completeBuild').text = \
str(hudson_model.THRESHOLDS[result]['complete']).lower()
def monitor_folders(parser, xml_parent, data):
"""yaml: monitor-folders
Configure Jenkins to monitor folders.
Requires the Jenkins :jenkins-wiki:`Filesystem Trigger Plugin
<FSTriggerPlugin>`.
:arg str path: Folder path to poll. (optional)
:arg list includes: Fileset includes setting that specifies the list of
includes files. Basedir of the fileset is relative to the workspace
root. If no value is set, all files are used. (optional)
:arg str excludes: The 'excludes' pattern. A file that matches this mask
will not be polled even if it matches the mask specified in 'includes'
section. (optional)
:arg bool check-modification-date: Check last modification date.
(default true)
:arg bool check-content: Check content. (default true)
:arg bool check-fewer: Check fewer or more files (default true)
:arg str cron: cron syntax of when to run (default '')
Example:
.. literalinclude:: /../../tests/triggers/fixtures/monitor_folders.yaml
"""
ft = XML.SubElement(xml_parent, ('org.jenkinsci.plugins.fstrigger.'
'triggers.FolderContentTrigger'))
path = data.get('path')
if path:
XML.SubElement(ft, 'path').text = path
includes = data.get('includes')
if includes:
XML.SubElement(ft, 'includes').text = ",".join(includes)
excludes = data.get('excludes')
if excludes:
XML.SubElement(ft, 'excludes').text = excludes
XML.SubElement(ft, 'spec').text = data.get('cron', '')
XML.SubElement(ft, 'excludeCheckLastModificationDate').text = str(
not data.get('check-modification-date', True)).lower()
XML.SubElement(ft, 'excludeCheckContent').text = str(
not data.get('check-content', True)).lower()
XML.SubElement(ft, 'excludeCheckFewerOrMoreFiles').text = str(
not data.get('check-fewer', True)).lower()
def ivy(parser, xml_parent, data):
"""yaml: ivy
Poll with an Ivy script
Requires the Jenkins :jenkins-wiki:`IvyTrigger Plugin
<IvyTrigger+Plugin>`.
:arg str path: Path of the ivy file. (optional)
:arg str settings-path: Ivy Settings Path. (optional)
:arg list str properties-file: List of properties file path. Properties
will be injected as variables in the ivy settings file. (optional)
:arg str properties-content: Properties content. Properties will be
injected as variables in the ivy settings file. (optional)
:arg bool debug: Active debug mode on artifacts resolution. (default false)
:arg download-artifacts: Download artifacts for dependencies to see if they
have changed. (default true)
:arg bool enable-concurrent: Enable Concurrent Build. (default false)
:arg str label: Restrict where the polling should run. (default '')
:arg str cron: cron syntax of when to run (default '')
Example:
.. literalinclude:: /../../tests/triggers/fixtures/ivy.yaml
"""
it = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.ivytrigger.IvyTrigger')
mappings = [('path', 'ivyPath', None),
('settings-path', 'ivySettingsPath', None),
('properties-file', 'propertiesFilePath', None),
('properties-content', 'propertiesContent', None),
('debug', 'debug', False),
('download-artifacts', 'downloadArtifacts', True),
('enable-concurrent', 'enableConcurrentBuild', False),
('cron', 'spec', '')]
for prop in mappings:
opt, xmlopt, default_val = prop[:3]
val = data.get(opt, default_val)
if val is not None:
if type(val) == bool:
val = str(val).lower()
if type(val) == list:
val = ";".join(val)
XML.SubElement(it, xmlopt).text = val
label = data.get('label')
XML.SubElement(it, 'labelRestriction').text = str(bool(label)).lower()
if label:
XML.SubElement(it, 'triggerLabel').text = label
def script(parser, xml_parent, data):
"""yaml: script
Triggers the job using shell or batch script.
Requires the Jenkins :jenkins-wiki:`ScriptTrigger Plugin
<ScriptTrigger+Plugin>`.
:arg str label: Restrict where the polling should run. (default '')
:arg str script: A shell or batch script. (default '')
:arg str script-file-path: A shell or batch script path. (default '')
:arg str cron: cron syntax of when to run (default '')
:arg bool enable-concurrent: Enables triggering concurrent builds.
(default false)
:arg int exit-code: If the exit code of the script execution returns this
expected exit code, a build is scheduled. (default 0)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/script.yaml
"""
data = data if data else {}
st = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.scripttrigger.ScriptTrigger'
)
label = data.get('label')
XML.SubElement(st, 'script').text = str(data.get('script', ''))
XML.SubElement(st, 'scriptFilePath').text = str(
data.get('script-file-path', ''))
XML.SubElement(st, 'spec').text = str(data.get('cron', ''))
XML.SubElement(st, 'labelRestriction').text = str(bool(label)).lower()
if label:
XML.SubElement(st, 'triggerLabel').text = label
XML.SubElement(st, 'enableConcurrentBuild').text = str(
data.get('enable-concurrent', False)).lower()
XML.SubElement(st, 'exitCode').text = str(data.get('exit-code', 0))
def gitlab_push(parser, xml_parent, data):
data = data if data else {}
glt = XML.SubElement(
xml_parent,
'com.dabsquared.gitlabjenkins.GitLabPushTrigger'
)
glt.set('plugin', 'gitlab-plugin@1.1.26')
spec = XML.SubElement(glt, 'spec')
triggerOnPush = XML.SubElement(glt, 'triggerOnPush')
triggerOnPush.text = str(data.get('triggerOnPush', False)).lower()
triggerOnMergeRequest = XML.SubElement(glt, 'triggerOnMergeRequest')
triggerOnMergeRequest.text = str(data.get('triggerOnMergeRequest', False)).lower()
triggerOpenMergeRequestOnPush = XML.SubElement(glt, 'triggerOpenMergeRequestOnPush')
triggerOpenMergeRequestOnPush.text = str(data.get('triggerOpenMergeRequestOnPush', 'never')).lower()
triggerOpenMergeRequestOnPush = XML.SubElement(glt, 'ciSkip')
triggerOpenMergeRequestOnPush.text = str(data.get('ciSkip', False)).lower()
setBuildDescription = XML.SubElement(glt, 'setBuildDescription')
setBuildDescription.text = str(data.get('setBuildDescription', False)).lower()
addNoteOnMergeRequest = XML.SubElement(glt, 'addNoteOnMergeRequest')
addNoteOnMergeRequest.text = str(data.get('addNoteOnMergeRequest', False)).lower()
addNoteOnMergeRequest = XML.SubElement(glt, 'addVoteOnMergeRequest')
addNoteOnMergeRequest.text = str(data.get('addVoteOnMergeRequest', False)).lower()
allowAllBranches = XML.SubElement(glt, 'allowAllBranches')
allowAllBranches.text = str(data.get('allowAllBranches', False)).lower()
includeBranchesSpec = XML.SubElement(glt, 'includeBranchesSpec')
branches = data.get('includeBranchesSpec', ['master'])
includeBranchesSpec.text = ','.join(branches)
excludeBranchesSpec = XML.SubElement(glt, 'excludeBranchesSpec')
branches = data.get('excludeBranchesSpec', [])
excludeBranchesSpec.text = ','.join(branches)
def groovy_script(parser, xml_parent, data):
"""yaml: groovy-script
Triggers the job using a groovy script.
Requires the Jenkins :jenkins-wiki:`ScriptTrigger Plugin
<ScriptTrigger+Plugin>`.
:arg bool system-script: If true, run the groovy script as a system script,
the script will have access to the same variables as the Groovy Console.
If false, run the groovy script on the executor node, the script will not
have access to the hudson or job model. (default false)
:arg str script: Content of the groovy script. If the script result is
evaluated to true, a build is scheduled. (default '')
:arg str script-file-path: Groovy script path. (default '')
:arg str property-file-path: Property file path. All properties will be set
as parameters for the triggered build. (optional)
:arg bool enable-concurrent: Enable concurrent build. (default false)
:arg str label: Restrict where the polling should run. (default '')
:arg str cron: cron syntax of when to run (default '')
Example:
.. literalinclude:: /../../tests/triggers/fixtures/groovy-script.yaml
"""
gst = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.scripttrigger.groovy.GroovyScriptTrigger'
)
XML.SubElement(gst, 'groovySystemScript').text = str(
data.get('system-script', False)).lower()
XML.SubElement(gst, 'groovyExpression').text = str(data.get('script', ''))
XML.SubElement(gst, 'groovyFilePath').text = str(data.get(
'script-file-path', ''))
if 'property-file-path' in data:
XML.SubElement(gst, 'propertiesFilePath').text = str(
data.get('property-file-path'))
XML.SubElement(gst, 'enableConcurrentBuild').text = str(
data.get('enable-concurrent', False)).lower()
label = data.get('label')
XML.SubElement(gst, 'labelRestriction').text = str(bool(label)).lower()
if label:
XML.SubElement(gst, 'triggerLabel').text = label
XML.SubElement(gst, 'spec').text = str(data.get('cron', ''))
class Triggers(jenkins_jobs.modules.base.Base):
sequence = 50
component_type = 'trigger'
component_list_type = 'triggers'
def gen_xml(self, parser, xml_parent, data):
triggers = data.get('triggers', [])
if not triggers:
return
trig_e = XML.SubElement(xml_parent, 'triggers', {'class': 'vector'})
for trigger in triggers:
self.registry.dispatch('trigger', parser, trig_e, trigger)
| {
"content_hash": "70a21ac1fa7edc8fa81079a092bf5563",
"timestamp": "",
"source": "github",
"line_count": 1220,
"max_line_length": 104,
"avg_line_length": 44.5516393442623,
"alnum_prop": 0.5989733777344397,
"repo_name": "sebbrandt87/jenkins-job-builder",
"id": "06d6d23eaec6d07c10d82d58cf3ada4c35ddd18c",
"size": "54956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jenkins_jobs/modules/triggers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "51"
},
{
"name": "C++",
"bytes": "791"
},
{
"name": "Python",
"bytes": "731817"
},
{
"name": "Shell",
"bytes": "916"
},
{
"name": "SourcePawn",
"bytes": "16"
}
],
"symlink_target": ""
} |
'''
synbiochem (c) University of Manchester 2015
synbiochem is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
import gzip
import os
import tempfile
import zipfile
import requests
def get_file(source_url, target_filename):
'''Downloads file from url and saves to target.'''
if not os.path.isfile(target_filename):
if not os.path.exists(os.path.dirname(target_filename)):
os.makedirs(os.path.dirname(target_filename))
resp = requests.get(source_url, allow_redirects=True)
with open(target_filename, 'w') as target_file:
target_file.write_bytes(resp.content)
destination = os.path.dirname(target_filename)
if target_filename.endswith('.zip'):
zfile = zipfile.ZipFile(target_filename, 'r')
target_filename = os.path.join(destination, zfile.namelist()[0])
zfile.extractall(destination)
elif target_filename.endswith('.gz'):
unzipped_filepath = target_filename[:-len('.gz')]
if os.path.exists(unzipped_filepath):
target_filename = unzipped_filepath
else:
input_file = gzip.open(target_filename, 'rb')
target_filename = os.path.join(destination,
input_file.name[:-len('.gz')])
output_file = open(target_filename, 'wb')
for line in input_file:
output_file.write(line)
input_file.close()
output_file.close()
return target_filename
def get_filename(filename):
'''Returns a filename, generating a temp file if necessary.'''
if filename is None:
fle = tempfile.NamedTemporaryFile(delete=False)
return fle.name
return filename
def get_filenames(filepaths, max_files=int(1e16)):
'''Get filename.'''
all_filenames = []
for filepath in filepaths:
all_filenames.extend(_get_filenames(filepath))
return all_filenames[:max_files]
def _get_filenames(filepath):
'''Get filename.'''
filenames = []
if os.path.isdir(filepath):
for filename in os.listdir(os.path.abspath(filepath)):
filenames.append(os.path.join(filepath, filename))
return filenames
# else:
return [filepath]
| {
"content_hash": "2172a33be4123a92457938192b12ab34",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 76,
"avg_line_length": 27.435294117647057,
"alnum_prop": 0.6355060034305318,
"repo_name": "synbiochem/synbiochem-py",
"id": "5d6daf02f5bc0970ba5dac79120be55a365cd7b7",
"size": "2332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synbiochem/utils/io_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "121177"
},
{
"name": "Shell",
"bytes": "37"
}
],
"symlink_target": ""
} |
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v11.resources",
marshal="google.ads.googleads.v11",
manifest={"KeywordPlanAdGroup",},
)
class KeywordPlanAdGroup(proto.Message):
r"""A Keyword Planner ad group.
Max number of keyword plan ad groups per plan: 200.
Attributes:
resource_name (str):
Immutable. The resource name of the Keyword Planner ad
group. KeywordPlanAdGroup resource names have the form:
``customers/{customer_id}/keywordPlanAdGroups/{kp_ad_group_id}``
keyword_plan_campaign (str):
The keyword plan campaign to which this ad
group belongs.
This field is a member of `oneof`_ ``_keyword_plan_campaign``.
id (int):
Output only. The ID of the keyword plan ad
group.
This field is a member of `oneof`_ ``_id``.
name (str):
The name of the keyword plan ad group.
This field is required and should not be empty
when creating keyword plan ad group.
This field is a member of `oneof`_ ``_name``.
cpc_bid_micros (int):
A default ad group max cpc bid in micros in
account currency for all biddable keywords under
the keyword plan ad group. If not set, will
inherit from parent campaign.
This field is a member of `oneof`_ ``_cpc_bid_micros``.
"""
resource_name = proto.Field(proto.STRING, number=1,)
keyword_plan_campaign = proto.Field(proto.STRING, number=6, optional=True,)
id = proto.Field(proto.INT64, number=7, optional=True,)
name = proto.Field(proto.STRING, number=8, optional=True,)
cpc_bid_micros = proto.Field(proto.INT64, number=9, optional=True,)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "ea4464763dbb4818afc305ae1ba9e444",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 35.283018867924525,
"alnum_prop": 0.6219251336898396,
"repo_name": "googleads/google-ads-python",
"id": "8b1caea05431d4d0452dc1804f1d7797c28841f4",
"size": "2470",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v11/resources/types/keyword_plan_ad_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
import os
import rollbar
import rollbar.contrib.flask
import structlog
from flask import got_request_exception
import heroku_bouncer
from dynoup import app
from scaler.utils import oauth_callback
import scaler.admin # noqa
logger = structlog.get_logger()
app.wsgi_app = heroku_bouncer.bouncer(app.wsgi_app, scope='write', auth_callback=oauth_callback)
@app.before_first_request
def init_rollbar():
rollbar.init(
# access token for the demo app: https://rollbar.com/demo
app.config['ROLLBAR_ACCESS_TOKEN'],
# environment name
'production',
# server root directory, makes tracebacks prettier
root=os.path.dirname(os.path.realpath(__file__)),
# flask already sets up logging
allow_logging_basic_config=False)
# send exceptions from `app` to rollbar, using flask's signal system.
got_request_exception.connect(rollbar.contrib.flask.report_exception, app)
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
app.run(debug=True)
| {
"content_hash": "2213990ffb2500e29f311b046de8be7f",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 96,
"avg_line_length": 26.622222222222224,
"alnum_prop": 0.7111853088480802,
"repo_name": "sibson/dynoup",
"id": "8286afbea00eabe698c0057796b4368bbb6b2162",
"size": "1198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "39056"
},
{
"name": "Shell",
"bytes": "58"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
# generic imports
import json
import jsonschema
import logging
# django imports
from django.conf import settings
from django.db import models
# import xml2kvp
from core.xml2kvp import XML2kvp
# Get an instance of a LOGGER
LOGGER = logging.getLogger(__name__)
# Set logging levels for 3rd party modules
logging.getLogger("requests").setLevel(logging.WARNING)
class FieldMapper(models.Model):
'''
Model to handle different Field Mappers
'''
name = models.CharField(max_length=128, null=True)
payload = models.TextField(null=True, default=None, blank=True)
config_json = models.TextField(null=True, default=None, blank=True)
field_mapper_type = models.CharField(
max_length=255,
choices=[
('xml2kvp', 'XML to Key/Value Pair (XML2kvp)'),
('xslt', 'XSL Stylesheet'),
('python', 'Python Code Snippet')]
)
def __str__(self):
return '%s, FieldMapper: #%s' % (self.name, self.id)
def as_dict(self):
return self.__dict__
@property
def config(self):
if self.config_json:
return json.loads(self.config_json)
return None
def validate_config_json(self, config_json=None):
# if config_json not provided, assume use self
if not config_json:
config_json = self.config_json
# load config_json as dictionary
config_dict = json.loads(config_json)
# validate against XML2kvp schema
jsonschema.validate(config_dict, XML2kvp.schema)
def get_field_mapper_choices():
choices = [
('xml2kvp', 'XML to Key/Value Pair (XML2kvp)'),
('xslt', 'XSL Stylesheet')
]
if getattr(settings, 'ENABLE_PYTHON', 'false') == 'true':
choices.append(('python', 'Python Code Snippet'))
return choices
| {
"content_hash": "f1fde1c8c7472cb605d6efe287dcc30e",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 71,
"avg_line_length": 25.506849315068493,
"alnum_prop": 0.6396348012889366,
"repo_name": "WSULib/combine",
"id": "5127daf792f2e5695aee02561411058bfd4e645b",
"size": "1886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/models/field_mapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "250903"
},
{
"name": "HTML",
"bytes": "372068"
},
{
"name": "JavaScript",
"bytes": "1926326"
},
{
"name": "Python",
"bytes": "639194"
},
{
"name": "Shell",
"bytes": "435"
},
{
"name": "XSLT",
"bytes": "38438"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/vest/shared_vest_hutt_gang_s02.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","vest_hutt_gang_s02")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "45ab6bd049b940f3f751b2f135a6da47",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 25,
"alnum_prop": 0.7046153846153846,
"repo_name": "anhstudios/swganh",
"id": "ab38da341804ff1d7037ecb6aacd763f8e875f23",
"size": "470",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/wearables/vest/shared_vest_hutt_gang_s02.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import unittest
import os
import torch
from dotenv import load_dotenv
import nlpaug.augmenter.sentence as nas
class TestContextualWordEmbsAug(unittest.TestCase):
@classmethod
def setUpClass(cls):
env_config_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', '.env'))
load_dotenv(env_config_path)
cls.model_paths = [
'xlnet-base-cased',
'gpt2',
'distilgpt2'
]
cls.text = 'The quick brown fox jumps over the lazy'
cls.texts = [
'The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog.',
"Seeing all of the negative reviews for this movie, I figured that it could be yet another comic masterpiece that wasn't quite meant to be."
]
def test_batch_size(self):
# 1 per batch
aug = nas.ContextualWordEmbsForSentenceAug(model_path='distilgpt2', batch_size=1)
aug_data = aug.augment(self.texts)
self.assertEqual(len(aug_data), len(self.texts))
# batch size = input size
aug = nas.ContextualWordEmbsForSentenceAug(model_path='distilgpt2', batch_size=len(self.texts))
aug_data = aug.augment(self.texts)
self.assertEqual(len(aug_data), len(self.texts))
# batch size > input size
aug = nas.ContextualWordEmbsForSentenceAug(model_path='distilgpt2', batch_size=len(self.texts)+1)
aug_data = aug.augment(self.texts)
self.assertEqual(len(aug_data), len(self.texts))
# input size > batch size
# aug = nas.ContextualWordEmbsForSentenceAug(model_path='distilgpt2', batch_size=2)
# aug_data = aug.augment(self.texts * 2)
# self.assertEqual(len(aug_data), len(self.texts)*2)
def test_none_device(self):
for model_path in self.model_paths:
aug = nas.ContextualWordEmbsForSentenceAug(
model_path=model_path, force_reload=True, device=None)
self.assertTrue(aug.device == 'cpu')
def test_reset_model(self):
for model_path in self.model_paths:
original_aug = nas.ContextualWordEmbsForSentenceAug(model_path=model_path, top_p=0.5)
original_temperature = original_aug.model.temperature
original_top_k = original_aug.model.top_k
# original_top_p = original_aug.model.top_p
new_aug = nas.ContextualWordEmbsForSentenceAug(
model_path=model_path, temperature=original_temperature+1, top_k=original_top_k+1)
new_temperature = new_aug.model.temperature
new_top_k = new_aug.model.top_k
# new_top_p = new_aug.model.top_p
self.assertEqual(original_temperature+1, new_temperature)
self.assertEqual(original_top_k + 1, new_top_k)
# self.assertEqual(original_top_p + 1, new_top_p)
def test_by_device(self):
if torch.cuda.is_available():
self.execute_by_device('cuda')
self.execute_by_device('cpu')
def execute_by_device(self, device):
for model_path in self.model_paths:
aug = nas.ContextualWordEmbsForSentenceAug(model_path=model_path, device=device)
self.empty_input(aug)
for data in [self.text, self.texts]:
self.insert(aug, data)
self.assertLess(0, len(self.model_paths))
def empty_input(self, aug):
text = ''
augmented_data = aug.augment(text)
self.assertTrue(len(augmented_data) == 0)
def insert(self, aug, data):
augmented_data = aug.augment(data)
if isinstance(data, list):
for d, a in zip(data, augmented_data):
self.assertLess(len(d.split(' ')), len(a.split(' ')))
self.assertNotEqual(d, a)
else:
augmented_text = augmented_data[0]
self.assertLess(len(data.split(' ')), len(augmented_text.split(' ')))
self.assertNotEqual(data, augmented_text)
| {
"content_hash": "accff7c499e0246b271642c3a1f78496",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 152,
"avg_line_length": 38.65384615384615,
"alnum_prop": 0.6159203980099502,
"repo_name": "makcedward/nlpaug",
"id": "4a79646cff5d83c096ea2465baaa3395a1e11303",
"size": "4020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/augmenter/sentence/test_context_word_embs_sentence.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "777279"
},
{
"name": "Python",
"bytes": "512156"
},
{
"name": "Shell",
"bytes": "2004"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.contrib import admin
from books_recsys_app.api import UsersList
import books_recsys_app.views
import rest_framework_swagger
'''
#working on django 1.7
urlpatterns = patterns('',
url(r'^docs/', include('rest_framework_swagger.urls')),
url(r'^$', 'books_recsys_app.views.home', name='home'),
url(r'^auth/', 'books_recsys_app.views.auth', name='auth'),
url(r'^signout/','books_recsys_app.views.signout',name='signout'),
url(r'^rate_movie/','books_recsys_app.views.rate_movie',name='rate_movie'),
url(r'^movies-recs/','books_recsys_app.views.movies_recs',name='movies_recs'),
url(r'^admin/', include(admin.site.urls)),
url(r'^users-list/',UsersList.as_view(),name='users-list')
)
'''
urlpatterns = [
url(r'^docs/', include('rest_framework_swagger.urls')),
url(r'^$', books_recsys_app.views.home, name='home'),
url(r'^auth/', books_recsys_app.views.auth, name='auth'),
url(r'^signout/',books_recsys_app.views.signout,name='signout'),
url(r'^rate_movie/',books_recsys_app.views.rate_movie,name='rate_movie'),
url(r'^movies-recs/',books_recsys_app.views.movies_recs,name='movies_recs'),
url(r'^admin/', include(admin.site.urls)),
url(r'^users-list/',UsersList.as_view(),name='users-list')
] | {
"content_hash": "7415bc69a218877826bee96187ef5e7a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 82,
"avg_line_length": 46.67857142857143,
"alnum_prop": 0.6732976281560826,
"repo_name": "xianjunzhengbackup/code",
"id": "21598ae105fc1234ab0105829755754804f1a4c2",
"size": "1360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data science/machine_learning_for_the_web/chapter_7/server_movierecsys/server_movierecsys/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "BitBake",
"bytes": "113"
},
{
"name": "BlitzBasic",
"bytes": "256"
},
{
"name": "CSS",
"bytes": "49827"
},
{
"name": "HTML",
"bytes": "157006325"
},
{
"name": "JavaScript",
"bytes": "14029"
},
{
"name": "Jupyter Notebook",
"bytes": "4875399"
},
{
"name": "Mako",
"bytes": "2060"
},
{
"name": "Perl",
"bytes": "716"
},
{
"name": "Python",
"bytes": "874414"
},
{
"name": "R",
"bytes": "454"
},
{
"name": "Shell",
"bytes": "3984"
}
],
"symlink_target": ""
} |
import logging
from connections.shift_reg_buffered import ShiftRegBuffered
from connections.shift_reg_gpio import ShiftRegGPIO
def get_connection_by_config(config) -> object:
connection = None
if config["con_type"] == "shiftreg":
connection = \
ShiftRegBuffered(
ShiftRegGPIO(**config["con_params"])
)
else:
logging.warning("Unknown connection: {0}".format(config))
return connection
| {
"content_hash": "d0be8fdc9c12e205b9689115891c1008",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 65,
"avg_line_length": 25.61111111111111,
"alnum_prop": 0.6529284164859002,
"repo_name": "s-kostyuk/smart_home_project",
"id": "8046faa09f0642cd292d8e21e9b1452ac5db7e15",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/connections/factories.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "7333"
},
{
"name": "Python",
"bytes": "28349"
}
],
"symlink_target": ""
} |
import paho.mqtt.publish as publish
publish.single("sensor", 'read_presence', hostname="172.20.10.7")
| {
"content_hash": "9ac88a2264f012dadc32c7151eb0721d",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 65,
"avg_line_length": 51,
"alnum_prop": 0.7549019607843137,
"repo_name": "ragulbalaji/Spaces",
"id": "58e27c475cb7e52f72de90f33b72da14345e1379",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Publish/publish.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "5272"
},
{
"name": "CSS",
"bytes": "1169"
},
{
"name": "HTML",
"bytes": "3815"
},
{
"name": "JavaScript",
"bytes": "42246"
},
{
"name": "Python",
"bytes": "5691"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 489cf746b500
Revises: None
Create Date: 2015-11-20 17:18:29.015000
"""
# revision identifiers, used by Alembic.
revision = '489cf746b500'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('password', sa.String(), nullable=True),
sa.Column('phone_number', sa.String(), nullable=False),
sa.Column('country_code', sa.String(), nullable=False),
sa.Column('phone_number_confirmed', sa.Boolean(), nullable=False),
sa.Column('authy_user_id', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
### end Alembic commands ###
| {
"content_hash": "d19785ccfdfa1d66dc98b6d8e970bdcc",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 70,
"avg_line_length": 28.555555555555557,
"alnum_prop": 0.6673151750972762,
"repo_name": "TwilioDevEd/account-verification-flask",
"id": "fac891fe0a1512e87d9a464ba8c7dcb0da54a538",
"size": "1028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/489cf746b500_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1461"
},
{
"name": "HTML",
"bytes": "7019"
},
{
"name": "JavaScript",
"bytes": "10918"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "21590"
}
],
"symlink_target": ""
} |
import sys, os, io
if sys.version_info < (2, 7):
raise ImportError("bounter requires python >= 2.7")
# TODO add ez_setup?
from setuptools import setup, find_packages, Extension
def read(fname):
name = os.path.join(os.path.dirname(__file__), fname)
if not os.path.isfile(name):
return ''
with io.open(name, encoding='utf-8') as readfile:
return readfile.read()
setup(
name='bounter',
version='1.2.0',
description='Counter for large datasets',
long_description=read('README.md'),
long_description_content_type='text/markdown',
headers=['cbounter/hll.h', 'cbounter/murmur3.h'],
ext_modules=[
Extension('bounter_cmsc', ['cbounter/cms_cmodule.c', 'cbounter/murmur3.c', 'cbounter/hll.c']),
Extension('bounter_htc', ['cbounter/ht_cmodule.c', 'cbounter/murmur3.c', 'cbounter/hll.c'])
],
packages=find_packages(),
author=u'Filip Stefanak',
author_email='f.stefanak@rare-technologies.com',
maintainer=u'RARE Technologies',
maintainer_email='opensource@rare-technologies.com',
url='https://github.com/RaRe-Technologies/bounter',
download_url='http://pypi.python.org/pypi/bounter',
keywords='counter, count-min sketch, bounded memory, hyperloglog, approximative counting, cardinality estimation',
license='MIT',
platforms='any',
test_suite="bounter.tests",
classifiers=[ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| {
"content_hash": "57a3a505448fc253fa2cb39cbd03f0ec",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 118,
"avg_line_length": 34.48275862068966,
"alnum_prop": 0.646,
"repo_name": "RaRe-Technologies/bounter",
"id": "2dbd7e87a39da52e829d40354880b512638bf1d6",
"size": "2238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "71557"
},
{
"name": "Python",
"bytes": "75229"
},
{
"name": "Shell",
"bytes": "1318"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib import admin
from reversion.admin import VersionAdmin
from stagecraft.apps.datasets.models.data_group import DataGroup
from stagecraft.apps.datasets.models.data_set import DataSet
class DataSetInline(admin.StackedInline):
model = DataSet
fields = ('name', 'data_type',)
readonly_fields = ('name', 'data_type',)
extra = 0
def has_delete_permission(self, request, obj=None):
return False
class DataGroupAdmin(VersionAdmin):
search_fields = ['name']
list_display = ('name',)
inlines = [
DataSetInline
]
admin.site.register(DataGroup, DataGroupAdmin)
| {
"content_hash": "41e9ea17241af4894567cb7b6d1c4acb",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 64,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.7095808383233533,
"repo_name": "alphagov/stagecraft",
"id": "3518c57567b3bae590d96ad450bb0332f896fb5e",
"size": "668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stagecraft/apps/datasets/admin/data_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "134"
},
{
"name": "HTML",
"bytes": "855"
},
{
"name": "JavaScript",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "622720"
},
{
"name": "Shell",
"bytes": "14467"
}
],
"symlink_target": ""
} |
import logging
from django.http import HttpResponse
from twilio.twiml.messaging_response import MessagingResponse
from apostello.reply import InboundSms
from apostello.twilio import twilio_view
from site_config.models import SiteConfiguration
logger = logging.getLogger("apostello")
@twilio_view
def sms(request):
"""
Handle all incoming messages from Twilio.
This is the start of the message processing pipeline.
"""
logger.info("Received new sms")
r = MessagingResponse()
msg = InboundSms(request.POST)
msg.start_bg_tasks()
config = SiteConfiguration.get_solo()
if msg.reply and not config.disable_all_replies:
logger.info("Add reply (%s) to response", msg.reply)
r.message(msg.reply)
logger.info("Return response to Twilio")
return HttpResponse(str(r), content_type="application/xml")
| {
"content_hash": "c1a421067664f5d22cf72020111ab45c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 63,
"avg_line_length": 27.741935483870968,
"alnum_prop": 0.7290697674418605,
"repo_name": "monty5811/apostello",
"id": "57813bc5fdeab67cff99c09ef965b40139d28378",
"size": "860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apostello/views/sms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18413"
},
{
"name": "Elm",
"bytes": "484874"
},
{
"name": "HTML",
"bytes": "21141"
},
{
"name": "JavaScript",
"bytes": "31346"
},
{
"name": "Makefile",
"bytes": "640"
},
{
"name": "Python",
"bytes": "372217"
},
{
"name": "Shell",
"bytes": "3175"
}
],
"symlink_target": ""
} |
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def layer(input_tensor, input_dim, output_dim, layer_name, activation_fn=tf.nn.relu):
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim])
variable_summaries(weights)
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases)
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.summary.histogram('pre_activations', preactivate)
if activation_fn is None:
return preactivate
activations = activation_fn(preactivate, name='activation')
tf.summary.histogram('activations', activations)
return activations
def conv_layer(input_tensor, input_channels, output_channels, layer_name, patch_shape=(5, 5), activation_fn=tf.nn.relu):
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
W_conv = weight_variable([patch_shape[0], patch_shape[1], input_channels, output_channels])
variable_summaries(W_conv)
with tf.name_scope('biases'):
b_conv = bias_variable([output_channels])
variable_summaries(b_conv)
with tf.name_scope('conv2d_plus_b'):
preactivate = conv2d(input_tensor, W_conv) + b_conv
tf.summary.histogram('pre_activations', preactivate)
h_conv = activation_fn(preactivate, name='activation')
tf.summary.histogram('h_conv', h_conv)
h_pool = max_pool_2x2(h_conv)
tf.summary.histogram('h_pool', h_pool)
return h_pool
def main():
input_dim = 784
output_dim = 10
num_filters1 = 32
num_filters2 = 64
keep_probability=0.5
learning_rate=1e-4
num_epochs=1000
x = tf.placeholder(tf.float32, shape=[None, input_dim])
x_image = tf.reshape(x, [-1, 28, 28, 1])
y_ = tf.placeholder(tf.float32, shape=[None, output_dim])
h_pool1 = conv_layer(x_image, 1, num_filters1, 'conv_layer1', patch_shape=(5, 5), activation_fn=tf.nn.relu)
h_pool2 = conv_layer(h_pool1, num_filters1, num_filters2, 'conv_layer2', patch_shape=(5, 5), activation_fn=tf.nn.relu)
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*num_filters2])
h_fc1 = layer(h_pool2_flat, 7*7*num_filters2, 1024, 'fc_layer1', activation_fn=tf.nn.relu)
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
tf.summary.scalar('dropout_keep_probability', keep_prob)
h_fc1_dropout = tf.nn.dropout(h_fc1, keep_prob)
y_conv = layer(h_fc1_dropout, 1024, output_dim, 'readout_layer', activation_fn=None)
with tf.name_scope('cross_entropy'):
with tf.name_scope('total'):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter('../log/mnist/train', sess.graph)
test_writer = tf.summary.FileWriter('../log/mnist/test')
for i in range(num_epochs):
if i % 10 == 0:
summary, acc = sess.run([merged, accuracy], feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})
test_writer.add_summary(summary, i)
print 'Accuracy at step %s: %s' % (i, acc)
else:
xs, ys = mnist.train.next_batch(100)
summary, _ = sess.run([merged, train_step], feed_dict={x: xs, y_: ys, keep_prob: keep_probability})
train_writer.add_summary(summary, i)
if __name__=='__main__':
main(); | {
"content_hash": "e51a832b4c43a634aac081039a31e051",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 120,
"avg_line_length": 33.635714285714286,
"alnum_prop": 0.6956891059672967,
"repo_name": "nmallinar/gans",
"id": "d3335c430c8ec2542553d7d084082bccf40f8505",
"size": "4709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/minst_tutorial_conv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14762"
}
],
"symlink_target": ""
} |
"""A clone of the Music Player Daemon (MPD) that plays music from a
Beets library. Attempts to implement a compatible protocol to allow
use of the wide range of MPD clients.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import re
from string import Template
import traceback
import random
import time
import beets
from beets.plugins import BeetsPlugin
import beets.ui
from beets import logging
from beets import vfs
from beets.util import bluelet
from beets.library import Item
from beets import dbcore
from beets.mediafile import MediaFile
PROTOCOL_VERSION = '0.13.0'
BUFSIZE = 1024
HELLO = 'OK MPD %s' % PROTOCOL_VERSION
CLIST_BEGIN = 'command_list_begin'
CLIST_VERBOSE_BEGIN = 'command_list_ok_begin'
CLIST_END = 'command_list_end'
RESP_OK = 'OK'
RESP_CLIST_VERBOSE = 'list_OK'
RESP_ERR = 'ACK'
NEWLINE = u"\n"
ERROR_NOT_LIST = 1
ERROR_ARG = 2
ERROR_PASSWORD = 3
ERROR_PERMISSION = 4
ERROR_UNKNOWN = 5
ERROR_NO_EXIST = 50
ERROR_PLAYLIST_MAX = 51
ERROR_SYSTEM = 52
ERROR_PLAYLIST_LOAD = 53
ERROR_UPDATE_ALREADY = 54
ERROR_PLAYER_SYNC = 55
ERROR_EXIST = 56
VOLUME_MIN = 0
VOLUME_MAX = 100
SAFE_COMMANDS = (
# Commands that are available when unauthenticated.
u'close', u'commands', u'notcommands', u'password', u'ping',
)
ITEM_KEYS_WRITABLE = set(MediaFile.fields()).intersection(Item._fields.keys())
# Loggers.
log = logging.getLogger('beets.bpd')
global_log = logging.getLogger('beets')
# Gstreamer import error.
class NoGstreamerError(Exception):
pass
# Error-handling, exceptions, parameter parsing.
class BPDError(Exception):
"""An error that should be exposed to the client to the BPD
server.
"""
def __init__(self, code, message, cmd_name='', index=0):
self.code = code
self.message = message
self.cmd_name = cmd_name
self.index = index
template = Template(u'$resp [$code@$index] {$cmd_name} $message')
def response(self):
"""Returns a string to be used as the response code for the
erring command.
"""
return self.template.substitute({
'resp': RESP_ERR,
'code': self.code,
'index': self.index,
'cmd_name': self.cmd_name,
'message': self.message,
})
def make_bpd_error(s_code, s_message):
"""Create a BPDError subclass for a static code and message.
"""
class NewBPDError(BPDError):
code = s_code
message = s_message
cmd_name = ''
index = 0
def __init__(self):
pass
return NewBPDError
ArgumentTypeError = make_bpd_error(ERROR_ARG, 'invalid type for argument')
ArgumentIndexError = make_bpd_error(ERROR_ARG, 'argument out of range')
ArgumentNotFoundError = make_bpd_error(ERROR_NO_EXIST, 'argument not found')
def cast_arg(t, val):
"""Attempts to call t on val, raising a ArgumentTypeError
on ValueError.
If 't' is the special string 'intbool', attempts to cast first
to an int and then to a bool (i.e., 1=True, 0=False).
"""
if t == 'intbool':
return cast_arg(bool, cast_arg(int, val))
else:
try:
return t(val)
except ValueError:
raise ArgumentTypeError()
class BPDClose(Exception):
"""Raised by a command invocation to indicate that the connection
should be closed.
"""
# Generic server infrastructure, implementing the basic protocol.
class BaseServer(object):
"""A MPD-compatible music player server.
The functions with the `cmd_` prefix are invoked in response to
client commands. For instance, if the client says `status`,
`cmd_status` will be invoked. The arguments to the client's commands
are used as function arguments following the connection issuing the
command. The functions may send data on the connection. They may
also raise BPDError exceptions to report errors.
This is a generic superclass and doesn't support many commands.
"""
def __init__(self, host, port, password):
"""Create a new server bound to address `host` and listening
on port `port`. If `password` is given, it is required to do
anything significant on the server.
"""
self.host, self.port, self.password = host, port, password
# Default server values.
self.random = False
self.repeat = False
self.volume = VOLUME_MAX
self.crossfade = 0
self.playlist = []
self.playlist_version = 0
self.current_index = -1
self.paused = False
self.error = None
# Object for random numbers generation
self.random_obj = random.Random()
def run(self):
"""Block and start listening for connections from clients. An
interrupt (^C) closes the server.
"""
self.startup_time = time.time()
bluelet.run(bluelet.server(self.host, self.port,
Connection.handler(self)))
def _item_info(self, item):
"""An abstract method that should response lines containing a
single song's metadata.
"""
raise NotImplementedError
def _item_id(self, item):
"""An abstract method returning the integer id for an item.
"""
raise NotImplementedError
def _id_to_index(self, track_id):
"""Searches the playlist for a song with the given id and
returns its index in the playlist.
"""
track_id = cast_arg(int, track_id)
for index, track in enumerate(self.playlist):
if self._item_id(track) == track_id:
return index
# Loop finished with no track found.
raise ArgumentNotFoundError()
def _random_idx(self):
"""Returns a random index different from the current one.
If there are no songs in the playlist it returns -1.
If there is only one song in the playlist it returns 0.
"""
if len(self.playlist) < 2:
return len(self.playlist) - 1
new_index = self.random_obj.randint(0, len(self.playlist) - 1)
while new_index == self.current_index:
new_index = self.random_obj.randint(0, len(self.playlist) - 1)
return new_index
def _succ_idx(self):
"""Returns the index for the next song to play.
It also considers random and repeat flags.
No boundaries are checked.
"""
if self.repeat:
return self.current_index
if self.random:
return self._random_idx()
return self.current_index + 1
def _prev_idx(self):
"""Returns the index for the previous song to play.
It also considers random and repeat flags.
No boundaries are checked.
"""
if self.repeat:
return self.current_index
if self.random:
return self._random_idx()
return self.current_index - 1
def cmd_ping(self, conn):
"""Succeeds."""
pass
def cmd_kill(self, conn):
"""Exits the server process."""
exit(0)
def cmd_close(self, conn):
"""Closes the connection."""
raise BPDClose()
def cmd_password(self, conn, password):
"""Attempts password authentication."""
if password == self.password:
conn.authenticated = True
else:
conn.authenticated = False
raise BPDError(ERROR_PASSWORD, 'incorrect password')
def cmd_commands(self, conn):
"""Lists the commands available to the user."""
if self.password and not conn.authenticated:
# Not authenticated. Show limited list of commands.
for cmd in SAFE_COMMANDS:
yield u'command: ' + cmd
else:
# Authenticated. Show all commands.
for func in dir(self):
if func.startswith('cmd_'):
yield u'command: ' + func[4:]
def cmd_notcommands(self, conn):
"""Lists all unavailable commands."""
if self.password and not conn.authenticated:
# Not authenticated. Show privileged commands.
for func in dir(self):
if func.startswith('cmd_'):
cmd = func[4:]
if cmd not in SAFE_COMMANDS:
yield u'command: ' + cmd
else:
# Authenticated. No commands are unavailable.
pass
def cmd_status(self, conn):
"""Returns some status information for use with an
implementation of cmd_status.
Gives a list of response-lines for: volume, repeat, random,
playlist, playlistlength, and xfade.
"""
yield (
u'volume: ' + unicode(self.volume),
u'repeat: ' + unicode(int(self.repeat)),
u'random: ' + unicode(int(self.random)),
u'playlist: ' + unicode(self.playlist_version),
u'playlistlength: ' + unicode(len(self.playlist)),
u'xfade: ' + unicode(self.crossfade),
)
if self.current_index == -1:
state = u'stop'
elif self.paused:
state = u'pause'
else:
state = u'play'
yield u'state: ' + state
if self.current_index != -1: # i.e., paused or playing
current_id = self._item_id(self.playlist[self.current_index])
yield u'song: ' + unicode(self.current_index)
yield u'songid: ' + unicode(current_id)
if self.error:
yield u'error: ' + self.error
def cmd_clearerror(self, conn):
"""Removes the persistent error state of the server. This
error is set when a problem arises not in response to a
command (for instance, when playing a file).
"""
self.error = None
def cmd_random(self, conn, state):
"""Set or unset random (shuffle) mode."""
self.random = cast_arg('intbool', state)
def cmd_repeat(self, conn, state):
"""Set or unset repeat mode."""
self.repeat = cast_arg('intbool', state)
def cmd_setvol(self, conn, vol):
"""Set the player's volume level (0-100)."""
vol = cast_arg(int, vol)
if vol < VOLUME_MIN or vol > VOLUME_MAX:
raise BPDError(ERROR_ARG, u'volume out of range')
self.volume = vol
def cmd_crossfade(self, conn, crossfade):
"""Set the number of seconds of crossfading."""
crossfade = cast_arg(int, crossfade)
if crossfade < 0:
raise BPDError(ERROR_ARG, u'crossfade time must be nonnegative')
def cmd_clear(self, conn):
"""Clear the playlist."""
self.playlist = []
self.playlist_version += 1
self.cmd_stop(conn)
def cmd_delete(self, conn, index):
"""Remove the song at index from the playlist."""
index = cast_arg(int, index)
try:
del(self.playlist[index])
except IndexError:
raise ArgumentIndexError()
self.playlist_version += 1
if self.current_index == index: # Deleted playing song.
self.cmd_stop(conn)
elif index < self.current_index: # Deleted before playing.
# Shift playing index down.
self.current_index -= 1
def cmd_deleteid(self, conn, track_id):
self.cmd_delete(conn, self._id_to_index(track_id))
def cmd_move(self, conn, idx_from, idx_to):
"""Move a track in the playlist."""
idx_from = cast_arg(int, idx_from)
idx_to = cast_arg(int, idx_to)
try:
track = self.playlist.pop(idx_from)
self.playlist.insert(idx_to, track)
except IndexError:
raise ArgumentIndexError()
# Update currently-playing song.
if idx_from == self.current_index:
self.current_index = idx_to
elif idx_from < self.current_index <= idx_to:
self.current_index -= 1
elif idx_from > self.current_index >= idx_to:
self.current_index += 1
self.playlist_version += 1
def cmd_moveid(self, conn, idx_from, idx_to):
idx_from = self._id_to_index(idx_from)
return self.cmd_move(conn, idx_from, idx_to)
def cmd_swap(self, conn, i, j):
"""Swaps two tracks in the playlist."""
i = cast_arg(int, i)
j = cast_arg(int, j)
try:
track_i = self.playlist[i]
track_j = self.playlist[j]
except IndexError:
raise ArgumentIndexError()
self.playlist[j] = track_i
self.playlist[i] = track_j
# Update currently-playing song.
if self.current_index == i:
self.current_index = j
elif self.current_index == j:
self.current_index = i
self.playlist_version += 1
def cmd_swapid(self, conn, i_id, j_id):
i = self._id_to_index(i_id)
j = self._id_to_index(j_id)
return self.cmd_swap(conn, i, j)
def cmd_urlhandlers(self, conn):
"""Indicates supported URL schemes. None by default."""
pass
def cmd_playlistinfo(self, conn, index=-1):
"""Gives metadata information about the entire playlist or a
single track, given by its index.
"""
index = cast_arg(int, index)
if index == -1:
for track in self.playlist:
yield self._item_info(track)
else:
try:
track = self.playlist[index]
except IndexError:
raise ArgumentIndexError()
yield self._item_info(track)
def cmd_playlistid(self, conn, track_id=-1):
return self.cmd_playlistinfo(conn, self._id_to_index(track_id))
def cmd_plchanges(self, conn, version):
"""Sends playlist changes since the given version.
This is a "fake" implementation that ignores the version and
just returns the entire playlist (rather like version=0). This
seems to satisfy many clients.
"""
return self.cmd_playlistinfo(conn)
def cmd_plchangesposid(self, conn, version):
"""Like plchanges, but only sends position and id.
Also a dummy implementation.
"""
for idx, track in enumerate(self.playlist):
yield u'cpos: ' + unicode(idx)
yield u'Id: ' + unicode(track.id)
def cmd_currentsong(self, conn):
"""Sends information about the currently-playing song.
"""
if self.current_index != -1: # -1 means stopped.
track = self.playlist[self.current_index]
yield self._item_info(track)
def cmd_next(self, conn):
"""Advance to the next song in the playlist."""
self.current_index = self._succ_idx()
if self.current_index >= len(self.playlist):
# Fallen off the end. Just move to stopped state.
return self.cmd_stop(conn)
else:
return self.cmd_play(conn)
def cmd_previous(self, conn):
"""Step back to the last song."""
self.current_index = self._prev_idx()
if self.current_index < 0:
return self.cmd_stop(conn)
else:
return self.cmd_play(conn)
def cmd_pause(self, conn, state=None):
"""Set the pause state playback."""
if state is None:
self.paused = not self.paused # Toggle.
else:
self.paused = cast_arg('intbool', state)
def cmd_play(self, conn, index=-1):
"""Begin playback, possibly at a specified playlist index."""
index = cast_arg(int, index)
if index < -1 or index > len(self.playlist):
raise ArgumentIndexError()
if index == -1: # No index specified: start where we are.
if not self.playlist: # Empty playlist: stop immediately.
return self.cmd_stop(conn)
if self.current_index == -1: # No current song.
self.current_index = 0 # Start at the beginning.
# If we have a current song, just stay there.
else: # Start with the specified index.
self.current_index = index
self.paused = False
def cmd_playid(self, conn, track_id=0):
track_id = cast_arg(int, track_id)
if track_id == -1:
index = -1
else:
index = self._id_to_index(track_id)
return self.cmd_play(conn, index)
def cmd_stop(self, conn):
"""Stop playback."""
self.current_index = -1
self.paused = False
def cmd_seek(self, conn, index, pos):
"""Seek to a specified point in a specified song."""
index = cast_arg(int, index)
if index < 0 or index >= len(self.playlist):
raise ArgumentIndexError()
self.current_index = index
def cmd_seekid(self, conn, track_id, pos):
index = self._id_to_index(track_id)
return self.cmd_seek(conn, index, pos)
def cmd_profile(self, conn):
"""Memory profiling for debugging."""
from guppy import hpy
heap = hpy().heap()
print(heap)
class Connection(object):
"""A connection between a client and the server. Handles input and
output from and to the client.
"""
def __init__(self, server, sock):
"""Create a new connection for the accepted socket `client`.
"""
self.server = server
self.sock = sock
self.authenticated = False
def send(self, lines):
"""Send lines, which which is either a single string or an
iterable consisting of strings, to the client. A newline is
added after every string. Returns a Bluelet event that sends
the data.
"""
if isinstance(lines, basestring):
lines = [lines]
out = NEWLINE.join(lines) + NEWLINE
log.debug(out[:-1]) # Don't log trailing newline.
if isinstance(out, unicode):
out = out.encode('utf8')
return self.sock.sendall(out)
def do_command(self, command):
"""A coroutine that runs the given command and sends an
appropriate response."""
try:
yield bluelet.call(command.run(self))
except BPDError as e:
# Send the error.
yield self.send(e.response())
else:
# Send success code.
yield self.send(RESP_OK)
def run(self):
"""Send a greeting to the client and begin processing commands
as they arrive.
"""
yield self.send(HELLO)
clist = None # Initially, no command list is being constructed.
while True:
line = yield self.sock.readline()
if not line:
break
line = line.strip()
if not line:
break
log.debug(line)
if clist is not None:
# Command list already opened.
if line == CLIST_END:
yield bluelet.call(self.do_command(clist))
clist = None # Clear the command list.
else:
clist.append(Command(line))
elif line == CLIST_BEGIN or line == CLIST_VERBOSE_BEGIN:
# Begin a command list.
clist = CommandList([], line == CLIST_VERBOSE_BEGIN)
else:
# Ordinary command.
try:
yield bluelet.call(self.do_command(Command(line)))
except BPDClose:
# Command indicates that the conn should close.
self.sock.close()
return
@classmethod
def handler(cls, server):
def _handle(sock):
"""Creates a new `Connection` and runs it.
"""
return cls(server, sock).run()
return _handle
class Command(object):
"""A command issued by the client for processing by the server.
"""
command_re = re.compile(br'^([^ \t]+)[ \t]*')
arg_re = re.compile(br'"((?:\\"|[^"])+)"|([^ \t"]+)')
def __init__(self, s):
"""Creates a new `Command` from the given string, `s`, parsing
the string for command name and arguments.
"""
command_match = self.command_re.match(s)
self.name = command_match.group(1)
self.args = []
arg_matches = self.arg_re.findall(s[command_match.end():])
for match in arg_matches:
if match[0]:
# Quoted argument.
arg = match[0]
arg = arg.replace(b'\\"', b'"').replace(b'\\\\', b'\\')
else:
# Unquoted argument.
arg = match[1]
arg = arg.decode('utf8')
self.args.append(arg)
def run(self, conn):
"""A coroutine that executes the command on the given
connection.
"""
# Attempt to get correct command function.
func_name = 'cmd_' + self.name
if not hasattr(conn.server, func_name):
raise BPDError(ERROR_UNKNOWN, u'unknown command', self.name)
func = getattr(conn.server, func_name)
# Ensure we have permission for this command.
if conn.server.password and \
not conn.authenticated and \
self.name not in SAFE_COMMANDS:
raise BPDError(ERROR_PERMISSION, u'insufficient privileges')
try:
args = [conn] + self.args
results = func(*args)
if results:
for data in results:
yield conn.send(data)
except BPDError as e:
# An exposed error. Set the command name and then let
# the Connection handle it.
e.cmd_name = self.name
raise e
except BPDClose:
# An indication that the connection should close. Send
# it on the Connection.
raise
except Exception as e:
# An "unintentional" error. Hide it from the client.
log.error(traceback.format_exc(e))
raise BPDError(ERROR_SYSTEM, u'server error', self.name)
class CommandList(list):
"""A list of commands issued by the client for processing by the
server. May be verbose, in which case the response is delimited, or
not. Should be a list of `Command` objects.
"""
def __init__(self, sequence=None, verbose=False):
"""Create a new `CommandList` from the given sequence of
`Command`s. If `verbose`, this is a verbose command list.
"""
if sequence:
for item in sequence:
self.append(item)
self.verbose = verbose
def run(self, conn):
"""Coroutine executing all the commands in this list.
"""
for i, command in enumerate(self):
try:
yield bluelet.call(command.run(conn))
except BPDError as e:
# If the command failed, stop executing.
e.index = i # Give the error the correct index.
raise e
# Otherwise, possibly send the output delimeter if we're in a
# verbose ("OK") command list.
if self.verbose:
yield conn.send(RESP_CLIST_VERBOSE)
# A subclass of the basic, protocol-handling server that actually plays
# music.
class Server(BaseServer):
"""An MPD-compatible server using GStreamer to play audio and beets
to store its library.
"""
def __init__(self, library, host, port, password):
try:
from beetsplug.bpd import gstplayer
except ImportError as e:
# This is a little hacky, but it's the best I know for now.
if e.args[0].endswith(' gst'):
raise NoGstreamerError()
else:
raise
super(Server, self).__init__(host, port, password)
self.lib = library
self.player = gstplayer.GstPlayer(self.play_finished)
self.cmd_update(None)
def run(self):
self.player.run()
super(Server, self).run()
def play_finished(self):
"""A callback invoked every time our player finishes a
track.
"""
self.cmd_next(None)
# Metadata helper functions.
def _item_info(self, item):
info_lines = [
u'file: ' + item.destination(fragment=True),
u'Time: ' + unicode(int(item.length)),
u'Title: ' + item.title,
u'Artist: ' + item.artist,
u'Album: ' + item.album,
u'Genre: ' + item.genre,
]
track = unicode(item.track)
if item.tracktotal:
track += u'/' + unicode(item.tracktotal)
info_lines.append(u'Track: ' + track)
info_lines.append(u'Date: ' + unicode(item.year))
try:
pos = self._id_to_index(item.id)
info_lines.append(u'Pos: ' + unicode(pos))
except ArgumentNotFoundError:
# Don't include position if not in playlist.
pass
info_lines.append(u'Id: ' + unicode(item.id))
return info_lines
def _item_id(self, item):
return item.id
# Database updating.
def cmd_update(self, conn, path=u'/'):
"""Updates the catalog to reflect the current database state.
"""
# Path is ignored. Also, the real MPD does this asynchronously;
# this is done inline.
print('Building directory tree...')
self.tree = vfs.libtree(self.lib)
print('... done.')
self.updated_time = time.time()
# Path (directory tree) browsing.
def _resolve_path(self, path):
"""Returns a VFS node or an item ID located at the path given.
If the path does not exist, raises a
"""
components = path.split(u'/')
node = self.tree
for component in components:
if not component:
continue
if isinstance(node, int):
# We're trying to descend into a file node.
raise ArgumentNotFoundError()
if component in node.files:
node = node.files[component]
elif component in node.dirs:
node = node.dirs[component]
else:
raise ArgumentNotFoundError()
return node
def _path_join(self, p1, p2):
"""Smashes together two BPD paths."""
out = p1 + u'/' + p2
return out.replace(u'//', u'/').replace(u'//', u'/')
def cmd_lsinfo(self, conn, path=u"/"):
"""Sends info on all the items in the path."""
node = self._resolve_path(path)
if isinstance(node, int):
# Trying to list a track.
raise BPDError(ERROR_ARG, 'this is not a directory')
else:
for name, itemid in iter(sorted(node.files.items())):
item = self.lib.get_item(itemid)
yield self._item_info(item)
for name, _ in iter(sorted(node.dirs.iteritems())):
dirpath = self._path_join(path, name)
if dirpath.startswith(u"/"):
# Strip leading slash (libmpc rejects this).
dirpath = dirpath[1:]
yield u'directory: %s' % dirpath
def _listall(self, basepath, node, info=False):
"""Helper function for recursive listing. If info, show
tracks' complete info; otherwise, just show items' paths.
"""
if isinstance(node, int):
# List a single file.
if info:
item = self.lib.get_item(node)
yield self._item_info(item)
else:
yield u'file: ' + basepath
else:
# List a directory. Recurse into both directories and files.
for name, itemid in sorted(node.files.iteritems()):
newpath = self._path_join(basepath, name)
# "yield from"
for v in self._listall(newpath, itemid, info):
yield v
for name, subdir in sorted(node.dirs.iteritems()):
newpath = self._path_join(basepath, name)
yield u'directory: ' + newpath
for v in self._listall(newpath, subdir, info):
yield v
def cmd_listall(self, conn, path=u"/"):
"""Send the paths all items in the directory, recursively."""
return self._listall(path, self._resolve_path(path), False)
def cmd_listallinfo(self, conn, path=u"/"):
"""Send info on all the items in the directory, recursively."""
return self._listall(path, self._resolve_path(path), True)
# Playlist manipulation.
def _all_items(self, node):
"""Generator yielding all items under a VFS node.
"""
if isinstance(node, int):
# Could be more efficient if we built up all the IDs and
# then issued a single SELECT.
yield self.lib.get_item(node)
else:
# Recurse into a directory.
for name, itemid in sorted(node.files.iteritems()):
# "yield from"
for v in self._all_items(itemid):
yield v
for name, subdir in sorted(node.dirs.iteritems()):
for v in self._all_items(subdir):
yield v
def _add(self, path, send_id=False):
"""Adds a track or directory to the playlist, specified by the
path. If `send_id`, write each item's id to the client.
"""
for item in self._all_items(self._resolve_path(path)):
self.playlist.append(item)
if send_id:
yield u'Id: ' + unicode(item.id)
self.playlist_version += 1
def cmd_add(self, conn, path):
"""Adds a track or directory to the playlist, specified by a
path.
"""
return self._add(path, False)
def cmd_addid(self, conn, path):
"""Same as `cmd_add` but sends an id back to the client."""
return self._add(path, True)
# Server info.
def cmd_status(self, conn):
for line in super(Server, self).cmd_status(conn):
yield line
if self.current_index > -1:
item = self.playlist[self.current_index]
yield u'bitrate: ' + unicode(item.bitrate / 1000)
# Missing 'audio'.
(pos, total) = self.player.time()
yield u'time: ' + unicode(pos) + u':' + unicode(total)
# Also missing 'updating_db'.
def cmd_stats(self, conn):
"""Sends some statistics about the library."""
with self.lib.transaction() as tx:
statement = 'SELECT COUNT(DISTINCT artist), ' \
'COUNT(DISTINCT album), ' \
'COUNT(id), ' \
'SUM(length) ' \
'FROM items'
artists, albums, songs, totaltime = tx.query(statement)[0]
yield (
u'artists: ' + unicode(artists),
u'albums: ' + unicode(albums),
u'songs: ' + unicode(songs),
u'uptime: ' + unicode(int(time.time() - self.startup_time)),
u'playtime: ' + u'0', # Missing.
u'db_playtime: ' + unicode(int(totaltime)),
u'db_update: ' + unicode(int(self.updated_time)),
)
# Searching.
tagtype_map = {
u'Artist': u'artist',
u'Album': u'album',
u'Title': u'title',
u'Track': u'track',
u'AlbumArtist': u'albumartist',
u'AlbumArtistSort': u'albumartist_sort',
# Name?
u'Genre': u'genre',
u'Date': u'year',
u'Composer': u'composer',
# Performer?
u'Disc': u'disc',
u'filename': u'path', # Suspect.
}
def cmd_tagtypes(self, conn):
"""Returns a list of the metadata (tag) fields available for
searching.
"""
for tag in self.tagtype_map:
yield u'tagtype: ' + tag
def _tagtype_lookup(self, tag):
"""Uses `tagtype_map` to look up the beets column name for an
MPD tagtype (or throw an appropriate exception). Returns both
the canonical name of the MPD tagtype and the beets column
name.
"""
for test_tag, key in self.tagtype_map.items():
# Match case-insensitively.
if test_tag.lower() == tag.lower():
return test_tag, key
raise BPDError(ERROR_UNKNOWN, u'no such tagtype')
def _metadata_query(self, query_type, any_query_type, kv):
"""Helper function returns a query object that will find items
according to the library query type provided and the key-value
pairs specified. The any_query_type is used for queries of
type "any"; if None, then an error is thrown.
"""
if kv: # At least one key-value pair.
queries = []
# Iterate pairwise over the arguments.
it = iter(kv)
for tag, value in zip(it, it):
if tag.lower() == u'any':
if any_query_type:
queries.append(any_query_type(value,
ITEM_KEYS_WRITABLE,
query_type))
else:
raise BPDError(ERROR_UNKNOWN, u'no such tagtype')
else:
_, key = self._tagtype_lookup(tag)
queries.append(query_type(key, value))
return dbcore.query.AndQuery(queries)
else: # No key-value pairs.
return dbcore.query.TrueQuery()
def cmd_search(self, conn, *kv):
"""Perform a substring match for items."""
query = self._metadata_query(dbcore.query.SubstringQuery,
dbcore.query.AnyFieldQuery,
kv)
for item in self.lib.items(query):
yield self._item_info(item)
def cmd_find(self, conn, *kv):
"""Perform an exact match for items."""
query = self._metadata_query(dbcore.query.MatchQuery,
None,
kv)
for item in self.lib.items(query):
yield self._item_info(item)
def cmd_list(self, conn, show_tag, *kv):
"""List distinct metadata values for show_tag, possibly
filtered by matching match_tag to match_term.
"""
show_tag_canon, show_key = self._tagtype_lookup(show_tag)
query = self._metadata_query(dbcore.query.MatchQuery, None, kv)
clause, subvals = query.clause()
statement = 'SELECT DISTINCT ' + show_key + \
' FROM items WHERE ' + clause + \
' ORDER BY ' + show_key
with self.lib.transaction() as tx:
rows = tx.query(statement, subvals)
for row in rows:
yield show_tag_canon + u': ' + unicode(row[0])
def cmd_count(self, conn, tag, value):
"""Returns the number and total time of songs matching the
tag/value query.
"""
_, key = self._tagtype_lookup(tag)
songs = 0
playtime = 0.0
for item in self.lib.items(dbcore.query.MatchQuery(key, value)):
songs += 1
playtime += item.length
yield u'songs: ' + unicode(songs)
yield u'playtime: ' + unicode(int(playtime))
# "Outputs." Just a dummy implementation because we don't control
# any outputs.
def cmd_outputs(self, conn):
"""List the available outputs."""
yield (
u'outputid: 0',
u'outputname: gstreamer',
u'outputenabled: 1',
)
def cmd_enableoutput(self, conn, output_id):
output_id = cast_arg(int, output_id)
if output_id != 0:
raise ArgumentIndexError()
def cmd_disableoutput(self, conn, output_id):
output_id = cast_arg(int, output_id)
if output_id == 0:
raise BPDError(ERROR_ARG, u'cannot disable this output')
else:
raise ArgumentIndexError()
# Playback control. The functions below hook into the
# half-implementations provided by the base class. Together, they're
# enough to implement all normal playback functionality.
def cmd_play(self, conn, index=-1):
new_index = index != -1 and index != self.current_index
was_paused = self.paused
super(Server, self).cmd_play(conn, index)
if self.current_index > -1: # Not stopped.
if was_paused and not new_index:
# Just unpause.
self.player.play()
else:
self.player.play_file(self.playlist[self.current_index].path)
def cmd_pause(self, conn, state=None):
super(Server, self).cmd_pause(conn, state)
if self.paused:
self.player.pause()
elif self.player.playing:
self.player.play()
def cmd_stop(self, conn):
super(Server, self).cmd_stop(conn)
self.player.stop()
def cmd_seek(self, conn, index, pos):
"""Seeks to the specified position in the specified song."""
index = cast_arg(int, index)
pos = cast_arg(int, pos)
super(Server, self).cmd_seek(conn, index, pos)
self.player.seek(pos)
# Volume control.
def cmd_setvol(self, conn, vol):
vol = cast_arg(int, vol)
super(Server, self).cmd_setvol(conn, vol)
self.player.volume = float(vol) / 100
# Beets plugin hooks.
class BPDPlugin(BeetsPlugin):
"""Provides the "beet bpd" command for running a music player
server.
"""
def __init__(self):
super(BPDPlugin, self).__init__()
self.config.add({
'host': u'',
'port': 6600,
'password': u'',
'volume': VOLUME_MAX,
})
self.config['password'].redact = True
def start_bpd(self, lib, host, port, password, volume, debug):
"""Starts a BPD server."""
if debug: # FIXME this should be managed by BeetsPlugin
self._log.setLevel(logging.DEBUG)
else:
self._log.setLevel(logging.WARNING)
try:
server = Server(lib, host, port, password)
server.cmd_setvol(None, volume)
server.run()
except NoGstreamerError:
global_log.error(u'Gstreamer Python bindings not found.')
global_log.error(u'Install "python-gst0.10", "py27-gst-python", '
u'or similar package to use BPD.')
def commands(self):
cmd = beets.ui.Subcommand(
'bpd', help='run an MPD-compatible music player server'
)
cmd.parser.add_option(
'-d', '--debug', action='store_true',
help='dump all MPD traffic to stdout'
)
def func(lib, opts, args):
host = args.pop(0) if args else self.config['host'].get(unicode)
port = args.pop(0) if args else self.config['port'].get(int)
if args:
raise beets.ui.UserError('too many arguments')
password = self.config['password'].get(unicode)
volume = self.config['volume'].get(int)
debug = opts.debug or False
self.start_bpd(lib, host, int(port), password, volume, debug)
cmd.func = func
return [cmd]
| {
"content_hash": "b9cd0d361bd3bf33c7b21d00c1c4e91a",
"timestamp": "",
"source": "github",
"line_count": 1179,
"max_line_length": 78,
"avg_line_length": 33.64461407972858,
"alnum_prop": 0.5589532861068395,
"repo_name": "ttsda/beets",
"id": "b998f5047c5fef434f9d2c308d9557b56e024571",
"size": "40314",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "beetsplug/bpd/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3307"
},
{
"name": "JavaScript",
"bytes": "85950"
},
{
"name": "Python",
"bytes": "1525413"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2017, Arm Limited and affiliates.
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os,sys
from icetea_lib.bench import Bench
class Testcase(Bench):
def __init__(self):
Bench.__init__(self, name = "send_data",
title = "Simple data transmission test",
status = "development",
type = "smoke",
subtype = "",
execution = {
"skip": {
"value": False,
"reason": ""
}
},
author = "Valtteri Erkkila",
purpose = "Tests that sending data works",
feature = ["MCPS-DATA"],
component = ["MAC"],
requirements = {
"duts": {
'*': {
"count":2,
"type": "hardware",
"allowed_platforms": ["K64F", "K66F", "NUCLEO_F429ZI", "KW24D", "UBLOX_EVK_ODIN_W2"],
"application": {
"name": "TEST_APPS-device-nanostack_mac_tester"
}
},
"1":{"nick": "First"},
"2":{"nick": "Second"}
}}
)
def setUp(self):
self.channel = 11
self.command("First", "addr --64-bit 01:02:03:00:00:00:00:01")
self.command("Second", "addr --64-bit 01:02:03:00:00:00:00:02")
def case(self):
self.command("First", "start --pan_coordinator true --logical_channel {}".format(self.channel))
self.command("Second", "start --pan_coordinator false --logical_channel {}".format(self.channel))
self.command("First", "data --dst_addr 01:02:03:00:00:00:00:02 --msdu_length 5 --msdu abcde")
self.command("Second", "data --dst_addr 01:02:03:00:00:00:00:01 --msdu_length 5 --msdu 12345")
def tearDown(self):
self.reset_dut()
| {
"content_hash": "d7ec8d710e888f54c2f7b352948f7b3c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 121,
"avg_line_length": 42.06060606060606,
"alnum_prop": 0.47586455331412103,
"repo_name": "c1728p9/mbed-os",
"id": "575b00c545aed5275238b6142a663a24c419dd40",
"size": "2776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TEST_APPS/testcases/nanostack_mac_tester/send_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "10950"
},
{
"name": "Assembly",
"bytes": "7778915"
},
{
"name": "Batchfile",
"bytes": "22"
},
{
"name": "C",
"bytes": "377551945"
},
{
"name": "C++",
"bytes": "14654442"
},
{
"name": "CMake",
"bytes": "50508"
},
{
"name": "HTML",
"bytes": "1421788"
},
{
"name": "Makefile",
"bytes": "119198"
},
{
"name": "Objective-C",
"bytes": "74970"
},
{
"name": "Perl",
"bytes": "2589"
},
{
"name": "Python",
"bytes": "1377580"
},
{
"name": "Shell",
"bytes": "88965"
},
{
"name": "XSLT",
"bytes": "8394"
}
],
"symlink_target": ""
} |
from kivy.app import App
from kivy.clock import Clock
from kivy.properties import (StringProperty)
from kivy.uix.widget import Widget
from random import randint
import kivent_cython # import needed for kv to work!
class DebugPanel(Widget):
"""docstring for DebugPanel"""
fps = StringProperty(None)
def __init__(self, *args, **kwargs):
super(DebugPanel, self).__init__(*args, **kwargs)
Clock.schedule_interval(self.update_fps, .1)
def update_fps(self, dt):
self.fps = str(int(Clock.get_fps()))
class TestGame(Widget):
def __init__(self, *args, **kwargs):
super(TestGame, self).__init__(*args, **kwargs)
Clock.schedule_once(self.init_game)
def init_game(self, dt):
self.setup_states()
self.set_state()
self.setup_map()
self.load_star()
Clock.schedule_interval(self.update, 1./60.)
def update(self, dt):
self.gameworld.update(dt)
def setup_states(self):
self.gameworld.add_state(
state_name='main',
systems_added=['quadtree_renderer'],
systems_removed=[],
systems_paused=[],
systems_unpaused=['quadtree_renderer'],
screenmanager_screen='main'
)
def set_state(self):
self.gameworld.state = 'main'
def setup_map(self):
self.gameworld.currentmap = self.gameworld.systems['map']
def load_star(self):
star_graphic = 'star.png'
star_size = (28, 28)
for i in range(50):
rand_x = randint(0, self.gameworld.currentmap.map_size[0])
rand_y = randint(0, self.gameworld.currentmap.map_size[1])
create_component_dict = {
'position': {
'position': (rand_x, rand_y)
},
'quadtree_renderer': {
'texture': star_graphic, 'size': star_size
}
}
component_order = ['position', 'quadtree_renderer']
self.gameworld.init_entity(create_component_dict, component_order)
class BasicApp(App):
def build(self):
pass
if __name__ == '__main__':
BasicApp().run()
| {
"content_hash": "e198069e7ee2b40636c073df950545bd",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 78,
"avg_line_length": 28.506493506493506,
"alnum_prop": 0.5699316628701595,
"repo_name": "nightmarebadger/kivy-trying-out",
"id": "cdc920de351bcd72fad2d6ce0ee0b95219728aa0",
"size": "2195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "KivEnt/tutorials/2-adding-something-to-the-screen/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22257"
}
],
"symlink_target": ""
} |
"""
Multiprocess logging management module.
"""
import sys
import logging
from queue import Empty
from atexit import register
from functools import wraps
from collections import namedtuple
from logging.handlers import QueueHandler
from multiprocessing import Queue, Process
from colorlog import ColoredFormatter
from setproctitle import setproctitle
log = logging.getLogger(__name__)
PrintRequest = namedtuple('PrintRequest', ['string', 'fd'])
def multiprocess_except_hook(exctype, value, traceback):
"""
Unhandled exception hook that works in a multiprocess environment.
"""
log.critical(
'Uncaught exception',
exc_info=(exctype, value, traceback)
)
class QueueListener:
"""
Object that will wait for messages in a queue and print or log them.
:param multiprocessing.Queue queue: A multiprocessing queue to listen to.
:param logging.Handler handler: A logging handler to delegate logging
requests to.
:param dict fds: Dictionary mapping a name with a file descriptor.
If ``None`` is passed, a basic dictionary with stdout and stderr will be
used.
"""
def __init__(self, queue, handler, fds=None):
self._queue = queue
self._handler = handler
self._fds = fds
if fds is None:
self._fds = {
'stdout': sys.stdout,
'stderr': sys.stderr,
}
def start(self):
"""
Start listening on the queue.
This method blocks until a ``None`` is submitted to the queue managed
by this instance.
"""
# Start listening for records
self._run_loop(True)
# There might still be records in the queue.
self._run_loop(False)
def _run_loop(self, block):
"""
Perform the listening and execution look.
:param bool block: Block on the queue or not.
"""
while True:
try:
# Handle stop
record = self._queue.get(block)
if record is None:
break
# Handle printing
if isinstance(record, PrintRequest):
if record.fd not in self._fds.keys():
log.error(
'Unknown fd to print to: {}'.format(record.fd)
)
continue
fd = self._fds[record.fd]
fd.write(record.string)
continue
# Handle logging
self._handler.handle(record)
except Empty:
if not block:
break
pass
class LoggingManager:
"""
Logging manager class.
This class is expected to run as a singleton. It allows to setup the
logging subprocess and handlers for all main process and later work
subprocesses with one single call.
"""
FORMAT = (
' {log_color}{levelname:8}{reset} | '
'{log_color}{message}{reset}'
)
FORMAT_DEBUG = (
' {log_color}{levelname:8}{reset} | '
'{process} - {log_color}{message}{reset}'
)
LEVELS = {
0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG,
}
def __init__(self):
self._verbosity = 0
self._log_queue = None
self._log_subprocess = None
def _logging_subprocess(self):
"""
Logging subprocess target function.
This function will setup the basic logging configuration and start
the listener on the logging queue.
"""
# Setup logging for logging subprocess
setproctitle('flowbber - logging manager')
# # Level
level = self.LEVELS.get(self._verbosity, logging.DEBUG)
# # Format
if level != logging.DEBUG:
format_tpl = self.FORMAT
else:
format_tpl = self.FORMAT_DEBUG
formatter = ColoredFormatter(fmt=format_tpl, style='{')
# # Handler
handler = logging.StreamHandler()
handler.setFormatter(formatter)
# # Configure baisc logging
logging.basicConfig(handlers=[handler], level=level)
# Start listening for logs and prints
listener = QueueListener(self._log_queue, handler)
listener.start()
def setup_logging(self, verbosity=0):
"""
Setup logging for this process.
The first time it is called it will create a subprocess to manage the
logging and printing, setup the subprocess for stream logging to stdout
and setup the main process to queue logging.
In consequence, any subprocess of the main process will inherit the
queue logging and become multiprocess logging safe.
This method can be called from subprocesses, but if at least one
logging has been performed it will fail as the handler will already
exists.
:param int verbosity: Verbosity level, as defined by
``LoggingManager.LEVELS``. The greater the number the more information
is provided, with 0 as initial level.
"""
# Perform first time setup in main process and start the logging
# subprocess
if self._log_queue is None:
# Change system exception hook
sys.excepthook = multiprocess_except_hook
# Create logging subprocess
self._verbosity = verbosity
self._log_queue = Queue()
self._log_subprocess = Process(
target=self._logging_subprocess
)
self._log_subprocess.start()
register(self.stop_logging)
# Check that no call to loggers have been made
root = logging.getLogger()
assert not root.hasHandlers()
# Create handler for main process and all subsequent subprocesses
level = self.LEVELS.get(self._verbosity, logging.DEBUG)
handler = QueueHandler(self._log_queue)
logging.basicConfig(handlers=[handler], level=level)
def stop_logging(self):
"""
Stop the logging subprocess.
This shouldn't be called unless the application is quitting.
"""
self._log_queue.put_nowait(None)
self._log_subprocess.join()
def enqueue_print(self, obj, fd='stdout'):
"""
Enqueue a print to the given fd.
:param obj: Object to print.
:param str fd: Name of the file descriptor.
Either stdout or stderr only.
"""
self._log_queue.put_nowait(
PrintRequest(string=str(obj) + '\n', fd=fd)
)
_INSTANCE = LoggingManager()
@wraps(_INSTANCE.setup_logging)
def setup_logging(verbosity=0):
_INSTANCE.setup_logging(verbosity=verbosity)
@wraps(_INSTANCE.enqueue_print)
def print(string, fd='stdout'):
_INSTANCE.enqueue_print(string, fd=fd)
def get_logger(name):
"""
Return a multiprocess safe logger.
:param str name: Name of the logger.
:return: A multiprocess safe logger.
:rtype: :py:class:`logging.Logger`.
"""
return logging.getLogger(name)
__all__ = ['setup_logging', 'print', 'get_logger']
| {
"content_hash": "7913dededbf564c11da536e9c7c79ea8",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 79,
"avg_line_length": 28.06201550387597,
"alnum_prop": 0.594475138121547,
"repo_name": "kuralabs/flowbber",
"id": "2d83b3e4ad594f2b9a7d873b9a9fe83b8fd91ba8",
"size": "7850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/flowbber/logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1062"
},
{
"name": "Python",
"bytes": "325671"
},
{
"name": "Shell",
"bytes": "730"
}
],
"symlink_target": ""
} |
BOT_NAME = 'bigghosthead'
SPIDER_MODULES = ['bigghosthead.spiders']
NEWSPIDER_MODULE = 'bigghosthead.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'bigghosthead (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 2
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',
'Cookie': '__jda=122270672.587570459.1489142411.1498783000.1498787748.22; __jdu=587570459; TrackID=1grRavjSXuGgVAr8-AyORkUtVbSVyp6HlNXLOCj5ZlIEkfEiGuoDVyHR0sxWdzRzCye947-T5VNHjA91gzuXYZFAiHWefBNlR15ySYISqKhU; pinId=1i_e86gMUP8GeDFmU-aO0g; unpl=V2_ZzNtbRAAEBwmAUQGexoJDWICEwkSV0RGclpDXXseWFJjAxYIclRCFXMUR1NnG10UZgoZX0VcQhdFCEdkeBpdAWQCEFRHZ3MURQtGZHMpWAxhBxRcR1NLJUUPdmQHRxddOl5CXkNXRiV2C0dQexBeBmEzE21GVEYXdQlOUXwpF2tmThZURFNFFHAMTmR6KV8%3d; __jdv=122270672|e.firefoxchina.cn|t_220520384_|tuiguang|cfb8b83c12d9400ea16b6c48165f515d|1498783000435; user-key=d07c1d7e-a6db-4ef1-b44f-576149e1f1d8; cn=0; ipLoc-djd=1-72-2799-0; __jdc=122270672; 3AB9D23F7A4B3C9B=24CHWH7HXUBR3Q5CQDRTCXDQHPEJF7KMGQUT54PGJ3VY3DOG7NCU3DKHVLTZ4JLQ4OV74P2RZF6AW5C2LXVRGX2V6M',
'Connection': 'keep-alive',
}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'bigghosthead.middlewares.BigghostheadSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'bigghosthead.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
WEBSERVICE_ENABLED = True
WEBSERVICE_PORT = [6080,7030]
WEBSERVICE_HOST = "127.0.0.1"
WEBSERVICE_RESOURCES_BASR = {
'scrapy.contrib.webservice.crawler.CrawlerResource': 1,
'scrapy.contrib.webservice.enginestatus.EngineStatusResource': 1,
'scrapy.contrib.webservice.stats.StatsResource': 1,
}
EXTENSIONS = {
'scrapy_jsonrpc.webservice.WebService': 500,
}
JSONRPC_ENABLED = True
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'bigghosthead.pipelines.BigghostheadPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
#REDIS_URL = 'redis://woaiqiuxiaodong.top:6379'# ่ฟไธชๅญๆฎตๅจsolveไนๅฐฑๆฏๅฎขๆท็ซฏไฝฟ็จ
#FILTER_URL = None
#FILTER_HOST = 'localhost'
#FILTER_PORT = 6379
#FILTER_DB = 0
# REDIS_QUEUE_NAME = 'OneName' # ๅฆๆไธ่ฎพ็ฝฎๆ่
่ฎพ็ฝฎไธบNone๏ผๅไฝฟ็จ้ป่ฎค็๏ผๆฏไธชspiderไฝฟ็จไธๅ็ๅป้้ๅๅ็งๅญ้ๅใๅฆๆ่ฎพ็ฝฎไบ๏ผๅไธๅspiderๅ
ฑ็จๅป้้ๅๅ็งๅญ้ๅ
"""
่ฟๆฏๅป้้ๅ็Redisไฟกๆฏใ
ๅๅ
็REDIS_HOSTใREDIS_PORTๅช่ด่ดฃ็งๅญ้ๅ๏ผ็ฑๆญค็งๅญ้ๅๅๅป้้ๅๅฏไปฅๅๅธๅจไธๅ็ๆบๅจไธใ
""" | {
"content_hash": "c153d09daba1f9e896047820a98f2585",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 752,
"avg_line_length": 41.970873786407765,
"alnum_prop": 0.7904233171408744,
"repo_name": "baijiege9/BigGhostHead",
"id": "6e55b9f92b91e593d1fb2e8836be06db3525e638",
"size": "4986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "www/bigghosthead/bigghosthead/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "349564"
},
{
"name": "Python",
"bytes": "224645"
}
],
"symlink_target": ""
} |
"""Read JSON taken from the stack analysis DB and re-creates pom.xml from the data."""
import sys
import json
def print_header():
"""Print the header for the pom.xml manifest file."""
print("""
<project>
<modelVersion>4.0.0</modelVersion>
<groupId>com.redhat.bayessian.test</groupId>
<artifactId>test-app-junit-dependency</artifactId>
<version>1.0</version>
<dependencies>""")
def print_footer():
"""Print the footer for the pom.xml manifest file."""
print("""
</dependencies>
</project>""")
def print_dependency(version, groupId, artifactId):
"""Add one dependency into the pom.xml manifest file."""
print("""
<dependency>
<groupId>{groupId}</groupId>
<artifactId>{artifactId}</artifactId>
<version>{version}</version>
</dependency>""".format(groupId=groupId, artifactId=artifactId,
version=version))
def json2pom(input):
"""Transform the given JSON input file into the project file."""
print_header()
dependencies = json.load(input)
# transform all dependencies found in the source JSON file
for dependency in dependencies:
version = dependency["version"]
name = dependency["name"]
assert version
assert name
(groupId, artifactId) = name.split(":")
print_dependency(version, groupId, artifactId)
print_footer()
json2pom(sys.stdin)
| {
"content_hash": "d887259b87951b17c3b5bff93122a5ca",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 86,
"avg_line_length": 26.09259259259259,
"alnum_prop": 0.6543647977288858,
"repo_name": "tisnik/fabric8-analytics-common",
"id": "c8fff46f69f4e6577e5de18fa955b37632b74e53",
"size": "1429",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integration-tests/hack/json2pom.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2827"
},
{
"name": "Dockerfile",
"bytes": "833"
},
{
"name": "Gherkin",
"bytes": "571433"
},
{
"name": "HTML",
"bytes": "65002"
},
{
"name": "Python",
"bytes": "797262"
},
{
"name": "Shell",
"bytes": "30956"
}
],
"symlink_target": ""
} |
from .layer import Layer
from ..activation import Activation
import numpy as np
class RelativeInput(object):
def __init__(self, output_size, s=1.0):
self.output_size = output_size
# Relative variable
self.x = np.random.uniform(-s, s, output_size)
self.q = np.random.uniform(-s, s, output_size)
def get_vars(self):
return self.q, self.x
def feed_forward(self, a_in):
return a_in, self.get_vars()
class Relative(object):
def __init__(self, input_size=0, output_size=0, activation="sigmoid", s=1.0):
self.input_size = input_size
self.output_size = output_size
self.activation_name = activation.lower()
self.activation = Activation.get(activation)
self.d_activation = Activation.get_d(activation)
# Weight initialization
c = np.sqrt(1.0 / (input_size + output_size))
self.b = np.random.uniform(-c, c, (1, output_size))
# Relative variable
self.x = np.random.uniform(-s, s, output_size)
self.q = np.random.uniform(-s, s, output_size)
def get_vars(self):
return self.q, self.x
def feed_forward(self, a_in, r_in):
return self.compute_a(self.compute_z(a_in, r_in)), (self.get_vars())
def compute_z(self, a_in, v_in):
atrans = a_in.transpose()
z = np.zeros((self.output_size, len(a_in)))
v_in_q = v_in[0]
v_in_x = v_in[1]
for j in range(self.output_size):
w_ji = self.q[j] * (v_in_x - self.x[j])
z[j] = self.b[0][j] + w_ji.dot(atrans)
return z.transpose()
def compute_a(self, z):
return self.activation(z)
def compute_da(self, z):
return self.d_activation(z)
| {
"content_hash": "153f4beaa268b4810716c43fc6857242",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 81,
"avg_line_length": 28.59016393442623,
"alnum_prop": 0.5825688073394495,
"repo_name": "awlange/brainsparks",
"id": "3996999578c2d87452a3211581a691b28fdde491",
"size": "1744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/calrissian/layers/relative.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1107074"
}
],
"symlink_target": ""
} |
"""
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __generic_resolver__ import GenericResolver
class Play44Resolver(GenericResolver):
name = "play44.net"
domains = ["play44.net"]
pattern = '(?://|\.)(play44\.net)/embed\.php?.*?vid=([0-9a-zA-Z_\-\./]+)[\?&]*'
def get_url(self, host, media_id):
return 'http://play44.net/embed.php?&vid=%s' % (media_id)
| {
"content_hash": "da1718f275753b09dfb90d950d2c88a6",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 83,
"avg_line_length": 40.34615384615385,
"alnum_prop": 0.6892278360343184,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "f69f191509338709ed99fe44ac922675ec3d0eed",
"size": "1049",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "script.module.urlresolver/lib/urlresolver/plugins/play44_net.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import rdflib
from rdflib.namespace import RDF, FOAF, RDFS, OWL, DC, DCTERMS, SKOS
from rdflib import URIRef, Literal, Namespace, XSD
import json
from mu.lib_unicode import UnicodeReader, UnicodeWriter
from mu.lib_dbpedia import DbpediaApi
import mu.mutil
from lib_ext import *
import re
import os
import hashlib
import json
import datetime
import urllib
import unicodedata
SWRC = Namespace('http://swrc.ontoware.org/ontology#')
SWC = Namespace('http://data.semanticweb.org/ns/swc/ontology#')
BIBO = Namespace('http://purl.org/ontology/bibo/')
ICAL = Namespace('http://www.w3.org/2002/12/cal/ical#')
DCTYPE = Namespace('http://purl.org/dc/dcmitype/')
VERSION_INFO = "iswc metadata 2001-2014 (2013-10-02)"
def expand_entry(entry):
map_data={}
if 'uri_me' in entry:
map_data["[ME]"]=entry['uri_me']
map_data["[WORKSHOP]"] ='http://data.semanticweb.org/workshop'
for key, value in entry.items():
for k, v in map_data.items():
temp = entry[key].replace(k, v)
if temp != entry[key]:
#print "\n{}\n-->\n{}".format(entry[key], temp)
entry[key] = temp
class DataIswc(object):
def __init__(self, local_config, global_config, dbpedia_api={}):
self.graph = rdflib.Graph()
self.graph.bind("foaf", FOAF)
self.graph.bind("dc", DC)
self.graph.bind("owl", OWL)
self.graph.bind("swrc", SWRC)
self.graph.bind("swc", SWC)
self.graph.bind("skos", SKOS)
self.graph.bind("bibo", BIBO)
self.graph.bind("dcterms", DCTERMS)
self.graph.bind("ical", ICAL)
self.graph.bind("dctype", DCTYPE)
self.local_config = local_config
self.global_config = global_config
self.map_name_res = {}
#self.map_name_name = {}
self.dbpedia_api = dbpedia_api
self.list_name_untyped = set()
@staticmethod
def dbpedia_api_load(dir_data):
dbpedia_api = {}
if os.path.exists(dir_data):
namespace = DataIswc.get_namespace(DataIswc.PREFIX_ORG)
dbpedia_api[namespace] = DbpediaApi(dir_data, DbpediaApi.ENTITY_TYPE_ORGANIZATION)
print "[{}] {} name mappings loaded".format(
namespace,
len(dbpedia_api[namespace].map_name))
# namespace = DataIswc.get_namespace(DataIswc.PREFIX_PERSON)
# dbpedia_api[namespace] = DbpediaApi(dir_data, DbpediaApi.ENTITY_TYPE_PERSON)
# print "[{}] {} name mappings loaded".format(
# namespace,
# len(dbpedia_api[namespace].map_name))
return dbpedia_api
@staticmethod
def dbpedia_api_write(dbpedia_api):
#save new entities
for api in dbpedia_api.values():
print "[]{} name mappings ".format(len(api.map_name))
api.write_new_data()
# def load_metadata(self):
# filename_source= "{0}/data/entity/organisation.csv".format(self.global_config["home"])
# if os.path.exists(filename_source):
# with open (filename_source) as f:
# csvreader = UnicodeReader(f)
# headers = csvreader.next()
# for row in csvreader:
# entry = dict(zip(headers, row))
# self.map_name_name[entry["altLabel"]] = {
# "prefLabel":entry["title"],
# "dbpediaUri":entry["uri"]}
# print "{0} name mappings loaded".format(len(self.map_name_name))
def run(self):
# self.load_metadata()
self.init_map_person_name()
self.process_organization()
self.process_person()
self.process_proceedings()
self.process_paper()
self.process_event()
self.process_misc()
filename_output = "{0}/data/www/{1}-complete.ttl".format(
self.global_config["home"],
self.local_config["id"])
with open(filename_output, "w") as f:
content = self.graph.serialize(format='turtle')
f.write(content)
print "{} name mappings without type".format(len(self.list_name_untyped))
def run_paper_x(self):
self.process_paper()
self.process_proceedings()
#self.process_misc()
filename_output = "{0}/data/www/{1}-conf-paper.ttl".format(
self.global_config["home"],
self.local_config["id"])
with open(filename_output, "w") as f:
content = self.graph.serialize(format='turtle')
f.write(content)
NS_ROOT = "http://data.semanticweb.org/"
PREFIX_ORG = "organization"
PREFIX_PERSON = "person"
PROP2URI = {
#datatype property
"label": {"p": [RDFS.label], "xsd": XSD.string},
"hasAcronym": {"p": [SWC.hasAcronym], "xsd": XSD.string},
"acronym": {"p": [SWC.hasAcronym], "xsd": XSD.string},
"name": {"p": [RDFS.label, FOAF.name], "xsd": XSD.string},
"title": {"p": [RDFS.label, DC.title, DCTERMS.title], "xsd": XSD.string},
"abstract": {"p": [SWRC.abstract], "xsd": XSD.string},
"hasAbstract": {"p": [SWRC.abstract], "xsd": XSD.string},
"year": {"p": [SWRC.year], "xsd": XSD.string},
"pages": {"p": [SWRC.pages], "xsd": XSD.string},
"keywords": {"p": [SWRC.listKeyword], "xsd": XSD.string, "delimiter": ","},
"publisher": {"p": [SWRC.publisher], "xsd": XSD.string},
"series": {"p": [SWRC.series], "xsd": XSD.string},
"volume": {"p": [SWRC.volume], "xsd": XSD.string},
"subtitle": {"p": [SWRC.subtitle], "xsd": XSD.string},
"alt-name": {"p": [SKOS.altLabel], "xsd": XSD.string, "delimiter": ","},
"other_names": {"p": [SKOS.altLabel], "xsd": XSD.string, "delimiter": ","},
"dtStart": {"p": [ICAL.dtstart], "xsd": XSD.dateTime},
"start": {"p": [ICAL.dtstart], "xsd": XSD.dateTime},
"dtEnd": {"p": [ICAL.dtend], "xsd": XSD.dateTime},
"end": {"p": [ICAL.dtend], "xsd": XSD.dateTime},
"tzid": {"p": [ICAL.tzid], "xsd": XSD.string},
"locationRoom": {"p": [SWC.hasLocation, SWC.room], "xsd": XSD.string},
"room": {"p": [SWC.hasLocation, SWC.room], "xsd": XSD.string},
"locationAddress": {"p": [SWC.hasLocation, SWC.address], "xsd": XSD.string},
"address": {"p": [SWC.hasLocation, SWC.address], "xsd": XSD.string},
"orderInSuperEvent": {"p": [SWC.orderInSession, SWC.order_in_super_event], "xsd": XSD.integer},
"order_in_super_event": {"p": [SWC.orderInSession, SWC.order_in_super_event], "xsd": XSD.integer},
"category": {"p": [SWRC.category], "xsd": XSD.string},
#object property
"link_open_access": {"p": [SWRC.url, SWRC.link_open_access]},
"link_open_access": {"p": [SWRC.url, SWRC.link_open_access]},
"link_publisher": {"p": [SWRC.url, SWRC.link_publisher]},
"link_publisher": {"p": [SWRC.url, SWRC.link_publisher]},
"linkDocument": {"p": [SWRC.url, SWRC.link_document]},
"link_document": {"p": [SWRC.url, SWRC.link_document]},
"depiction": {"p": [FOAF.depiction]},
"logo": {"p": [FOAF.logo]},
"homepage": {"p": [FOAF.homepage]}
}
@staticmethod
def get_namespace(prefix):
if DataIswc.PREFIX_ORG == prefix:
return "{0}{1}/".format(DataIswc.NS_ROOT, prefix)
elif DataIswc.PREFIX_PERSON == prefix:
return "{0}{1}/".format(DataIswc.NS_ROOT, prefix)
else:
return DataIswc.NS_ROOT
def expand_uri(self, uri):
for key in self.local_config["prefix_ns_map"]:
uri = uri.replace(key, self.local_config["prefix_ns_map"][key])
return uri
def cache_map_name_res(self, name, res):
#remove extra white space around
name = name.strip()
name = re.sub("\s+", " ", name)
localname = create_ascii_localname(name)
self.map_name_res[localname] = res
def create_list_named_entity(self, namespace, name):
real_name = None
if name in self.map_name_info:
if "real_name" in self.map_name_info[name]:
real_name=self.map_name_info[name]["real_name"]
#remove extra white space around
name = name.strip()
name = re.sub("\s+", " ", name)
ret = {}
#use canonical name
bool_processed = False
map_name_entry= {}
if namespace in self.dbpedia_api:
api = self.dbpedia_api[namespace]
map_name_entry = api.process_names(name)
for name_new in map_name_entry:
entry = map_name_entry[name_new]
if DbpediaApi.is_entry_auto(entry):
#print entry
print "new entry [{}]=>[{}]".format(name_new, entry["title"])
elif DbpediaApi.is_entry_skip(entry):
print "skip entry [{}]=>[{}]".format(name_new, entry["title"])
else:
#print entry
bool_processed = True
else:
map_name_entry[name] = None
for name_new in map_name_entry:
entry = map_name_entry[name_new]
if not bool_processed:
self.list_name_untyped.add(name_new)
localname = create_ascii_localname(name_new)
if localname in self.map_name_res:
ret[name_new] = self.map_name_res[localname]
else:
uri = "{0}{1}".format(namespace, localname)
res_entity = URIRef(uri)
if real_name:
self.create_triple_simple(res_entity, "name", real_name)
else:
self.create_triple_simple(res_entity, "name", name_new)
self.map_name_res[localname] = res_entity
if entry and 'uri' in entry and entry['uri']:
self.graph.add((res_entity, OWL.sameAs, URIRef(entry['uri'])))
if namespace == DataIswc.get_namespace(DataIswc.PREFIX_PERSON):
self.graph.add((res_entity, RDF.type, FOAF.Person))
elif namespace == DataIswc.get_namespace(DataIswc.PREFIX_ORG):
self.graph.add((res_entity, RDF.type, FOAF.Organization))
ret[name_new] = res_entity
return ret
def create_role_to_event(self, uri_event, role_type, role_label, res_entity):
if len(uri_event) == 0:
return
if len(role_type) == 0:
return
if len(role_label) == 0:
return
uri_event = self.expand_uri(uri_event)
res_event = URIRef(uri_event)
res_role_type = URIRef(self.expand_uri(role_type))
uri_role = "%s/%s" % (uri_event, create_ascii_localname(role_label) )
res_role = URIRef(uri_role)
self.graph.add((res_role, RDF.type, res_role_type))
self.graph.add((res_role, RDFS.label, Literal(role_label)))
self.graph.add((res_role, SWC.isRoleAt, res_event))
self.graph.add((res_role, SWC.heldBy, res_entity))
self.graph.add((res_event, SWC.hasRole, res_role ))
self.graph.add((res_entity, SWC.holdsRole, res_role))
def create_triple_complex(self, res_subject, list_field, entry):
for field in list_field:
if field in entry:
self.create_triple_simple(res_subject, field, entry[field])
def create_triple_simple(self, res_subject, field, value):
if len(value) == 0:
return
for p in DataIswc.PROP2URI[field]["p"]:
if "xsd" in DataIswc.PROP2URI[field]:
if XSD.string == DataIswc.PROP2URI[field]["xsd"]:
self.graph.add((res_subject, p, Literal(value)))
else:
self.graph.add((res_subject, p, Literal(value, datatype=DataIswc.PROP2URI[field]["xsd"])))
else:
self.graph.add((res_subject, p, URIRef(value)))
def process_misc(self):
res_me = URIRef(self.expand_uri("[ME]"))
res_data = URIRef(self.expand_uri("[ME]/complete"))
self.graph.add((res_me, SWC.completeGraph, res_data ))
self.graph.add((res_data, RDF.type, DCTYPE.Dataset ))
self.graph.add((res_data, DCTERMS.hasVersion, Literal(VERSION_INFO)))
self.graph.add((res_data, RDFS.comment, Literal(
"This dataset is created by Li Ding http://liding.org. To learn more about this dataset, go to https://github.com/lidingpku/open-conference-data/tree/master/data/iswc ")))
self.graph.add(
(res_data, DCTERMS.modified, Literal(datetime.datetime.now().isoformat(), datatype=XSD.datetime)))
self.graph.add((res_data, DCTERMS.creator, Literal("Li Ding")))
def process_organization(self):
filename = "{0}/data/source/{1}-organization.csv".format(
self.global_config["home"],
self.local_config["id"])
with open(filename) as f:
csvreader = UnicodeReader(f)
headers = csvreader.next()
for row in csvreader:
if len(row) < len(headers):
#print "skipping row %s" % row
continue
entry = dict(zip(headers, row))
if len(entry["name"]) == 0:
#print "skipping empty name row %s" % entry
continue
for res_organization in self.create_list_named_entity(DataIswc.get_namespace(DataIswc.PREFIX_ORG), entry["name"]).values():
#object properties
self.create_triple_complex(res_organization, ["homepage", "logo"], entry)
#role
self.create_role_to_event(
entry["role_event"],
entry["role_type"],
entry["role_label"],
res_organization)
def init_map_person_name(self):
if hasattr(self, "map_name"):
return
# load global entity name mappings
filename = "{0}/data/entity/person.csv".format(
self.global_config["home"],
self.local_config["id"])
map_name = {} #othername -> name
map_name_info = {} #name -> (real name, list of other name)
with open(filename) as f:
csvreader = UnicodeReader(f)
headers = csvreader.next()
for row in csvreader:
if len(row) != len(headers):
#print "skipping mismatch row %s" % row
continue
entry = dict(zip(headers, row))
if entry["name"]:
name = entry["name"].strip()
if ["other_names"]:
#real_name = entry["name"]
#if "real_name" in entry:
# real_name = entry["real_name"]
map_name_info[name] = {"other_names": [x.strip() for x in entry["other_names"].split(";")]}
for other_name in map_name_info[name]["other_names"]:
map_name[other_name] = name
self.map_name = map_name
self.map_name_info = map_name_info
def get_final_name(self,name):
self.init_map_person_name()
name = name.strip()
if name in self.map_name:
return self.map_name[name]
else:
return name
def process_person(self):
#load person
filename = "{0}/data/source/{1}-person.csv".format(
self.global_config["home"],
self.local_config["id"])
with open(filename) as f:
csvreader = UnicodeReader(f)
headers = csvreader.next()
for row in csvreader:
if len(row) != len(headers):
#print "skipping mismatch row %s" % row
continue
entry = dict(zip(headers, row))
if len(entry["name"]) == 0:
#print "skipping empty name row %s" % entry
continue
name = entry["name"].strip()
name = self.get_final_name(name)
for res_person in self.create_list_named_entity(DataIswc.get_namespace(DataIswc.PREFIX_PERSON), name).values():
#map other names
for other_name in entry["other_names"].split(","):
self.cache_map_name_res(other_name, res_person)
if name in self.map_name_info:
for other_name in self.map_name_info[name]["other_names"]:
self.cache_map_name_res(other_name, res_person)
#object properties
self.create_triple_complex(res_person, ["homepage"], entry)
#role
self.create_role_to_event(
entry["role_event"],
entry["role_type"],
entry["role_label"],
res_person)
#organization
if "organization" in entry:
for org in entry["organization"].split(";"):
if len(org) == 0:
continue
for res_organization in self.create_list_named_entity(DataIswc.get_namespace(DataIswc.PREFIX_ORG), org).values():
self.graph.add((res_organization, FOAF.member, res_person))
#inverse property
self.graph.add((res_person, SWRC.affiliation, res_organization))
#alt-name
self.create_triple_complex(res_person, ["other_names"], entry)
#email
if len(entry["email"]) > 0:
if not entry["email"].startswith("mailto:"):
mbox = "mailto:%s" % entry["email"]
else:
mbox = entry["email"]
mbox_sha1sum = hashlib.sha1(mbox).hexdigest()
#self.graph.add( (res_person, FOAF.mbox, URIRef(mbox)) )
self.graph.add((res_person, FOAF.mbox_sha1sum, Literal(mbox_sha1sum)))
def process_event(self):
filename = "{0}/data/source/{1}-event.csv".format(
self.global_config["home"],
self.local_config["id"])
counter_event = MyCounter()
with open(filename) as f:
csvreader = UnicodeReader(f)
headers = csvreader.next()
for row in csvreader:
if len(row) != len(headers):
#print "skipping mismatch row %s" % row
continue
entry = dict(zip(headers, row))
if len(entry["label"].strip()) == 0:
#print "skipping empty label row %s" % entry
continue
if len(entry["event_type"].strip()) == 0:
#print "skipping empty event_type row %s" % entry
continue
if entry["event_uri"].startswith("#"):
#print "skipping empty commented row %s" % entry
continue
#set default super event
if len(entry["super_event_uri"]) == 0:
entry["super_event_uri"] = "[ME]"
expand_entry(entry)
uri_super_event = self.expand_uri(entry["super_event_uri"])
res_super_event = URIRef(uri_super_event)
if len(entry["event_uri"]) == 0:
counter_event.inc(uri_super_event)
entry["event_uri"] = "%s/event-%02d" % (
uri_super_event,
counter_event.data[uri_super_event])
uri_event = self.expand_uri(entry["event_uri"])
res_event = URIRef(uri_event)
#event type
self.graph.add((res_event, RDF.type, SWC[entry["event_type"]]))
#super event
self.graph.add((res_event, SWC.isSubEventOf, res_super_event))
self.graph.add((res_super_event, SWC.isSuperEventOf, res_event))
#simple properties
self.create_triple_complex(
res_event,
["label", "acronym", "abstract",
"order_in_super_event",
"start", "end", "tzid",
"room", "address",
"homepage", "link_document", "logo"],
entry)
#linking paper event
if "TalkEvent" == entry["event_type"]:
if entry["label"] in self.map_name_res:
res_paper = self.map_name_res[entry["label"]]
self.graph.add(( res_event, SWC.hasRelatedDocument, res_paper))
self.graph.add(( res_paper, SWC.relatedToEvent, res_event))
else:
print "missing paper link [{}]".format(entry["label"])
#print json.dumps(self.map_name_res, indent=4, sort_keys=True)
sys.exit(0)
#role -chair
for role in ["Chair", "Presenter"]:
role_lower = role.lower()
if len(entry[role_lower + "_person"]) > 0:
person_data = DataIswc.parse_person_list(entry[role_lower + "_person"])
for name in person_data["list"]:
if len(name) == 0:
continue
name = self.get_final_name(name)
for res_person in self.create_list_named_entity(DataIswc.get_namespace(DataIswc.PREFIX_PERSON),name).values():
role_label_x = entry[role_lower + "_label"]
event_type_x = entry["event_type"].split("#")[-1].replace("Event", "")
if event_type_x in ["Workshop", "Tutorial"]:
role_label_x = u"{} {}".format(event_type_x, role_label_x)
assert (len(role.strip())>0)
self.create_role_to_event(
uri_event,
"swc:" + role,
role_label_x,
res_person)
def create_container(self, elements, contType, uri_subject=None):
'''http://dev.w3.org/2004/PythonLib-IH/NewRDFLib/rdflib/Graph.py'''
if None == uri_subject:
container = BNode()
else:
container = URIRef(uri_subject)
self.graph.add((container, RDF.type, contType))
for i in range(0, len(elements)):
uri_pred = "%s_%d" % (RDF, i + 1)
pred = URIRef(uri_pred)
self.graph.add((container, pred, elements[i]))
return container
@staticmethod
def parse_person_list(text):
author_x = text
author_x = re.sub("[,\s]+and[,\s]+", ",", author_x)
author_x = re.sub("\s+", " ", author_x)
list_author_x = [x.strip() for x in author_x.split(",")]
if "" in list_author_x:
#print "....."
list_author_x.remove("")
if len(list_author_x) > 1:
author_x_and = "{} and {}".format(",".join(list_author_x[0:-1]), list_author_x[-1])
else:
author_x_and = list_author_x[0]
ret = {}
ret["text"] = author_x_and
ret["list"] = list_author_x
return ret
def process_paper(self):
filename = "{0}/data/source/iswc-all-papers.csv".format(
self.global_config["home"])
if self.local_config["id"] in ["iswc-2013","iswc-2014"]:
filename = "{}/data/source/{}-paper.csv".format(
self.global_config["home"],
self.local_config["id"])
counter_paper = MyCounter()
with open(filename) as f:
csvreader = UnicodeReader(f)
headers = csvreader.next()
for row in csvreader:
if len(row) != len(headers):
#print "skipping mismatch row %s" % row
continue
entry = dict(zip(headers, row))
if entry["year"] != self.local_config["year"]:
#skip mismatched year
continue
if len(entry["title"]) == 0:
print "skipping empty title row %s" % entry
continue
if len(entry["proceedings_uri"]) == 0:
print "skipping empty proceedings row %s" % entry
continue
expand_entry(entry)
counter_paper.inc(entry["proceedings_uri"])
id_paper = counter_paper.data[entry["proceedings_uri"]]
uri_paper = "%s/paper-%02d" % (entry["proceedings_uri"], id_paper)
uri_paper_author_list = "%s/paper-%02d/author_list" % (entry["proceedings_uri"], id_paper)
#print json.dumps(entry, indent=4)
#print uri_paper
res_proceedings = URIRef(entry["proceedings_uri"])
res_paper = URIRef(uri_paper)
self.graph.add((res_paper, RDF.type, SWRC.InProceedings ))
#part-of proceedings
self.graph.add((res_paper, SWC.isPartOf, res_proceedings))
self.graph.add((res_proceedings, SWC.hasPart, res_paper))
#author
author_data = DataIswc.parse_person_list(entry["author"])
# if author_x_and != entry["author"]:
# print "--------------"
# print entry["author"]
# print author_x_and
# author_x_and_y = re.sub("\s+"," ",author_x_and)
# if author_x_and != author_x_and_y:
# print "????"
# print author_x_and
# print author_x_and_y
self.graph.add((res_paper, SWRC.listAuthor, Literal(author_data["text"])))
list_res_author = []
for author in author_data["list"]:
author = self.get_final_name(author)
for res_author in self.create_list_named_entity(DataIswc.get_namespace(DataIswc.PREFIX_PERSON), author).values():
self.graph.add((res_author, RDF.type, FOAF.Person))
list_res_author.append(res_author)
self.graph.add((res_paper, SWRC.author, res_author))
self.graph.add((res_paper, FOAF.maker, res_author))
self.graph.add((res_author, FOAF.made, res_paper))
res_paper_author_list = self.create_container(list_res_author, RDF.Seq, uri_paper_author_list)
self.graph.add((res_paper, BIBO.authorList, res_paper_author_list))
#simple properties
self.create_triple_complex(
res_paper,
["abstract", "keywords", "year", "pages", "title", "category",
"link_open_access", "link_publisher"],
entry)
#cache
self.map_name_res[entry["title"]] = res_paper
def process_proceedings(self):
filename = "{0}/data/source/iswc-all-proceedings.csv".format(
self.global_config["home"])
counter_paper = MyCounter()
with open(filename) as f:
csvreader = UnicodeReader(f)
headers = csvreader.next()
for row in csvreader:
if len(row) != len(headers):
print "skipping mismatch row %s" % row
continue
entry = dict(zip(headers, row))
if entry["year"] != self.local_config["year"]:
#skip mismatched year
continue
if len(entry["title"]) == 0:
print "skipping empty title row %s" % entry
continue
if len(entry["proceedings_uri"]) == 0:
print "skipping empty proceedings_uri row %s" % entry
continue
expand_entry(entry)
uri_proceedings = self.expand_uri(entry["proceedings_uri"])
uri_proceedings_editor_list = "%s/editor_list" % (uri_proceedings)
uri_event = self.expand_uri(entry["event_uri"])
#print json.dumps(entry, indent=4)
#print uri_proceedings
res_proceedings = URIRef(uri_proceedings)
res_event = URIRef(uri_event)
self.graph.add((res_proceedings, RDF.type, SWRC.Proceedings ))
#relation to event
self.graph.add((res_proceedings, SWC.relatedToEvent, res_event))
self.graph.add((res_event, SWRC.hasRelatedDocument, res_proceedings))
#editor
if len(entry["editor"]) > 0:
self.graph.add((res_proceedings, SWRC.listEditor, Literal(entry["editor"])))
list_res_editor = []
for editor in entry["editor"].split(","):
editor = self.get_final_name(editor)
for res_editor in self.create_list_named_entity(DataIswc.get_namespace(DataIswc.PREFIX_PERSON), editor).values():
list_res_editor.append(res_editor)
self.graph.add((res_proceedings, SWRC.editor, res_editor))
self.graph.add((res_proceedings, FOAF.maker, res_editor))
self.graph.add((res_editor, FOAF.made, res_proceedings))
res_proceedings_editor_list = self.create_container(list_res_editor, RDF.Seq,
uri_proceedings_editor_list)
self.graph.add((res_proceedings, SWC.editorList, res_proceedings_editor_list))
#simple properties
self.create_triple_complex(
res_proceedings,
["title", "subtitle", "abstract", "keywords", "year", "pages", "publisher", "series", "volume",
"link_open_access", "link_publisher", "depiction"],
entry)
def main():
# load config file
#with open("config.json") as f:
# global_config = json.load( f)
global_config = mu.mutil.config_load(file_home=__file__)
print global_config
dir_data_entity = os.path.join(global_config["home"], "data/entity/")
dbpedia_api = DataIswc.dbpedia_api_load(dir_data_entity)
try:
for year in range(2001, 2015):
#if year != 2014:
# continue
local_config = {
"year": "{}".format(year),
"id-swsa": "ISWC{}".format(year),
"id-dogfood": "iswc-{}".format(year),
"id": "iswc-{}".format(year),
"prefix_ns_map": {
"[ISWC]": "{}conference/iswc".format(DataIswc.NS_ROOT),
"[WORKSHOP]": "{}workshop".format(DataIswc.NS_ROOT),
"[ME]": "{}conference/iswc/{}".format(DataIswc.NS_ROOT, year),
"swc:": "http://data.semanticweb.org/ns/swc/ontology#"
}
}
print "processing {}".format(local_config["id"])
if year == 2007:
local_config["id-dogfood"] = "iswc-aswc-2007"
local_config["prefix_ns_map"]["[ME]"] = "{}conference/iswc-aswc/{}".format(
DataIswc.NS_ROOT, year)
# elif year==2001:
# local_config["id-dogfood"]="swws-2001"
# local_config["prefix_ns_map"]["[ME]"] ="{}conference/iswc/{}".format(
# DataIswc.NS_ROOT,
# local_config["id-dogfood"])
data = DataIswc(local_config, global_config, dbpedia_api)
data.run_paper_x()
if not year in range(2006, 2012):
data = DataIswc(local_config, global_config, dbpedia_api)
data.run()
DataIswc.dbpedia_api_write(dbpedia_api)
except:
DataIswc.dbpedia_api_write(dbpedia_api)
import traceback
traceback.print_exc()
raise
print "All done"
if __name__ == "__main__":
main()
| {
"content_hash": "226957dfec6ff9c39f50ddfa6c02efe1",
"timestamp": "",
"source": "github",
"line_count": 831,
"max_line_length": 183,
"avg_line_length": 39.63297232250301,
"alnum_prop": 0.510308182784272,
"repo_name": "mr-justin/open-conference-data",
"id": "2c84effc4f129a231372ec51c44b92826911041f",
"size": "32959",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "iswc-metadata/src/task_csv2rdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "58"
},
{
"name": "HTML",
"bytes": "709404"
},
{
"name": "Python",
"bytes": "319429"
},
{
"name": "Shell",
"bytes": "145"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20160627_1833'),
]
operations = [
migrations.AddField(
model_name='status_jiraissue',
name='jira_issue_text',
field=models.TextField(default=None),
),
]
| {
"content_hash": "1720c9451c5f83c7acbd144e29c61a8d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 49,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.5974358974358974,
"repo_name": "sunnytambi/QuickScrum",
"id": "f0f5fad904f79e53a5df88e7a1690e4b17a74fdd",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "QuickScrum/app/migrations/0004_status_jiraissue_jira_issue_text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5598"
},
{
"name": "HTML",
"bytes": "32727"
},
{
"name": "JavaScript",
"bytes": "16910"
},
{
"name": "Python",
"bytes": "265734"
}
],
"symlink_target": ""
} |
"""Handles all requests relating to instances (guest vms)."""
import datetime
import eventlet
import re
import time
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import network
from nova import quota
from nova import rpc
from nova import utils
from nova import volume
from nova.compute import instance_types
from nova.compute import power_state
from nova.scheduler import api as scheduler_api
from nova.db import base
LOG = logging.getLogger('nova.compute.api')
FLAGS = flags.FLAGS
flags.DECLARE('vncproxy_topic', 'nova.vnc')
flags.DEFINE_integer('find_host_timeout', 30,
'Timeout after NN seconds when looking for a host.')
def generate_default_hostname(instance_id):
"""Default function to generate a hostname given an instance reference."""
return str(instance_id)
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_service=None, network_api=None,
volume_api=None, hostname_factory=generate_default_hostname,
**kwargs):
if not image_service:
image_service = utils.import_object(FLAGS.image_service)
self.image_service = image_service
if not network_api:
network_api = network.API()
self.network_api = network_api
if not volume_api:
volume_api = volume.API()
self.volume_api = volume_api
self.hostname_factory = hostname_factory
super(API, self).__init__(**kwargs)
def get_network_topic(self, context, instance_id):
"""Get the network topic for an instance."""
try:
instance = self.get(context, instance_id)
except exception.NotFound:
LOG.warning(_("Instance %d was not found in get_network_topic"),
instance_id)
raise
host = instance['host']
if not host:
raise exception.Error(_("Instance %d has no host") % instance_id)
topic = self.db.queue_get_for(context, FLAGS.compute_topic, host)
return rpc.call(context,
topic,
{"method": "get_network_topic", "args": {'fake': 1}})
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
limit = quota.allowed_injected_files(context)
if len(injected_files) > limit:
raise quota.QuotaError(code="OnsetFileLimitExceeded")
path_limit = quota.allowed_injected_file_path_bytes(context)
content_limit = quota.allowed_injected_file_content_bytes(context)
for path, content in injected_files:
if len(path) > path_limit:
raise quota.QuotaError(code="OnsetFilePathLimitExceeded")
if len(content) > content_limit:
raise quota.QuotaError(code="OnsetFileContentLimitExceeded")
def _check_metadata_properties_quota(self, context, metadata={}):
"""Enforce quota limits on metadata properties."""
num_metadata = len(metadata)
quota_metadata = quota.allowed_metadata_items(context, num_metadata)
if quota_metadata < num_metadata:
pid = context.project_id
msg = _("Quota exceeeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties") % locals()
LOG.warn(msg)
raise quota.QuotaError(msg, "MetadataLimitExceeded")
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in metadata.iteritems():
if len(k) > 255 or len(v) > 255:
pid = context.project_id
msg = _("Quota exceeeded for %(pid)s, metadata property "
"key or value too long") % locals()
LOG.warn(msg)
raise quota.QuotaError(msg, "MetadataLimitExceeded")
def create(self, context, instance_type,
image_id, kernel_id=None, ramdisk_id=None,
min_count=1, max_count=1,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
injected_files=None):
"""Create the number and type of instances requested.
Verifies that quota and other arguments are valid.
"""
if not instance_type:
instance_type = instance_types.get_default_instance_type()
num_instances = quota.allowed_instances(context, max_count,
instance_type)
if num_instances < min_count:
pid = context.project_id
LOG.warn(_("Quota exceeeded for %(pid)s,"
" tried to run %(min_count)s instances") % locals())
raise quota.QuotaError(_("Instance quota exceeded. You can only "
"run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded")
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, injected_files)
image = self.image_service.show(context, image_id)
os_type = None
if 'properties' in image and 'os_type' in image['properties']:
os_type = image['properties']['os_type']
if kernel_id is None:
kernel_id = image['properties'].get('kernel_id', None)
if ramdisk_id is None:
ramdisk_id = image['properties'].get('ramdisk_id', None)
# FIXME(sirp): is there a way we can remove null_kernel?
# No kernel and ramdisk for raw images
if kernel_id == str(FLAGS.null_kernel):
kernel_id = None
ramdisk_id = None
LOG.debug(_("Creating a raw instance"))
# Make sure we have access to kernel and ramdisk (if not raw)
logging.debug("Using Kernel=%s, Ramdisk=%s" %
(kernel_id, ramdisk_id))
if kernel_id:
self.image_service.show(context, kernel_id)
if ramdisk_id:
self.image_service.show(context, ramdisk_id)
if security_group is None:
security_group = ['default']
if not type(security_group) is list:
security_group = [security_group]
security_groups = []
self.ensure_default_security_group(context)
for security_group_name in security_group:
group = db.security_group_get_by_name(context,
context.project_id,
security_group_name)
security_groups.append(group['id'])
if key_data is None and key_name:
key_pair = db.key_pair_get(context, context.user_id, key_name)
key_data = key_pair['public_key']
base_options = {
'reservation_id': utils.generate_uid('r'),
'image_id': image_id,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'state': 0,
'state_description': 'scheduling',
'user_id': context.user_id,
'project_id': context.project_id,
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'local_gb': instance_type['local_gb'],
'display_name': display_name,
'display_description': display_description,
'user_data': user_data or '',
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata,
'availability_zone': availability_zone,
'os_type': os_type}
elevated = context.elevated()
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = dict(mac_address=utils.generate_mac(),
launch_index=num,
**base_options)
instance = self.db.instance_create(context, instance)
instance_id = instance['id']
elevated = context.elevated()
if not security_groups:
security_groups = []
for security_group_id in security_groups:
self.db.instance_add_security_group(elevated,
instance_id,
security_group_id)
# Set sane defaults if not specified
updates = dict(hostname=self.hostname_factory(instance_id))
if (not hasattr(instance, 'display_name') or
instance.display_name is None):
updates['display_name'] = "Server %s" % instance_id
instance = self.update(context, instance_id, **updates)
instances.append(instance)
pid = context.project_id
uid = context.user_id
LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
" instance %(instance_id)s") % locals())
# NOTE(sandy): For now we're just going to pass in the
# instance_type record to the scheduler. In a later phase
# we'll be ripping this whole for-loop out and deferring the
# creation of the Instance record. At that point all this will
# change.
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "run_instance",
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id,
"instance_type": instance_type,
"availability_zone": availability_zone,
"injected_files": injected_files}})
for group_id in security_groups:
self.trigger_security_group_members_refresh(elevated, group_id)
return [dict(x.iteritems()) for x in instances]
def has_finished_migration(self, context, instance_id):
"""Returns true if an instance has a finished migration."""
try:
db.migration_get_by_instance_and_status(context, instance_id,
'finished')
return True
except exception.NotFound:
return False
def ensure_default_security_group(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
try:
db.security_group_get_by_name(context, context.project_id,
'default')
except exception.NotFound:
values = {'name': 'default',
'description': 'default',
'user_id': context.user_id,
'project_id': context.project_id}
db.security_group_create(context, values)
def trigger_security_group_rules_refresh(self, context, security_group_id):
"""Called when a rule is added to or removed from a security_group."""
security_group = self.db.security_group_get(context, security_group_id)
hosts = set()
for instance in security_group['instances']:
if instance['host'] is not None:
hosts.add(instance['host'])
for host in hosts:
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "refresh_security_group_rules",
"args": {"security_group_id": security_group.id}})
def trigger_security_group_members_refresh(self, context, group_id):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for whom this is
relevant.
"""
# First, we get the security group rules that reference this group as
# the grantee..
security_group_rules = \
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id)
# ..then we distill the security groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context,
rule['parent_group_id'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = set()
for security_group in security_groups:
for instance in security_group['instances']:
instances.add(instance)
# ...then we find the hosts where they live...
hosts = set()
for instance in instances:
if instance['host']:
hosts.add(instance['host'])
# ...and finally we tell these nodes to refresh their view of this
# particular security group.
for host in hosts:
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "refresh_security_group_members",
"args": {"security_group_id": group_id}})
def update(self, context, instance_id, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
:param instance_id: ID of the instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:returns: None
"""
rv = self.db.instance_update(context, instance_id, kwargs)
return dict(rv.iteritems())
@scheduler_api.reroute_compute("delete")
def delete(self, context, instance_id):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate %s"), instance_id)
try:
instance = self.get(context, instance_id)
except exception.NotFound:
LOG.warning(_("Instance %s was not found during terminate"),
instance_id)
raise
if instance['state_description'] == 'terminating':
LOG.warning(_("Instance %s is already being terminated"),
instance_id)
return
if instance['state_description'] == 'migrating':
LOG.warning(_("Instance %s is being migrated"), instance_id)
return
self.update(context,
instance['id'],
state_description='terminating',
state=0,
terminated_at=datetime.datetime.utcnow())
host = instance['host']
if host:
self._cast_compute_message('terminate_instance', context,
instance_id, host)
else:
self.db.instance_destroy(context, instance_id)
def get(self, context, instance_id):
"""Get a single instance with the given instance_id."""
rv = self.db.instance_get(context, instance_id)
return dict(rv.iteritems())
@scheduler_api.reroute_compute("get")
def routing_get(self, context, instance_id):
"""A version of get with special routing characteristics.
Use this method instead of get() if this is the only operation you
intend to to. It will route to novaclient.get if the instance is not
found.
"""
return self.get(context, instance_id)
def get_all(self, context, project_id=None, reservation_id=None,
fixed_ip=None):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retreive
all instances in the system.
"""
if reservation_id is not None:
return self.db.instance_get_all_by_reservation(
context, reservation_id)
if fixed_ip is not None:
return self.db.fixed_ip_get_instance(context, fixed_ip)
if project_id or not context.is_admin:
if not context.project:
return self.db.instance_get_all_by_user(
context, context.user_id)
if project_id is None:
project_id = context.project_id
return self.db.instance_get_all_by_project(
context, project_id)
return self.db.instance_get_all(context)
def _cast_compute_message(self, method, context, instance_id, host=None,
params=None):
"""Generic handler for RPC casts to compute.
:param params: Optional dictionary of arguments to be passed to the
compute worker
:returns: None
"""
if not params:
params = {}
if not host:
instance = self.get(context, instance_id)
host = instance['host']
queue = self.db.queue_get_for(context, FLAGS.compute_topic, host)
params['instance_id'] = instance_id
kwargs = {'method': method, 'args': params}
rpc.cast(context, queue, kwargs)
def _call_compute_message(self, method, context, instance_id, host=None,
params=None):
"""Generic handler for RPC calls to compute.
:param params: Optional dictionary of arguments to be passed to the
compute worker
:returns: Result returned by compute worker
"""
if not params:
params = {}
if not host:
instance = self.get(context, instance_id)
host = instance["host"]
queue = self.db.queue_get_for(context, FLAGS.compute_topic, host)
params['instance_id'] = instance_id
kwargs = {'method': method, 'args': params}
return rpc.call(context, queue, kwargs)
def _cast_scheduler_message(self, context, args):
"""Generic handler for RPC calls to the scheduler."""
rpc.cast(context, FLAGS.scheduler_topic, args)
def _find_host(self, context, instance_id):
"""Find the host associated with an instance."""
for attempts in xrange(FLAGS.find_host_timeout):
instance = self.get(context, instance_id)
host = instance["host"]
if host:
return host
time.sleep(1)
raise exception.Error(_("Unable to find host for Instance %s")
% instance_id)
def _set_admin_password(self, context, instance_id, password):
"""Set the root/admin password for the given instance."""
host = self._find_host(context, instance_id)
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "set_admin_password",
"args": {"instance_id": instance_id, "new_pass": password}})
def snapshot(self, context, instance_id, name):
"""Snapshot the given instance.
:returns: A dict containing image metadata
"""
properties = {'instance_id': str(instance_id),
'user_id': str(context.user_id)}
sent_meta = {'name': name, 'is_public': False,
'properties': properties}
recv_meta = self.image_service.create(context, sent_meta)
params = {'image_id': recv_meta['id']}
self._cast_compute_message('snapshot_instance', context, instance_id,
params=params)
return recv_meta
def reboot(self, context, instance_id):
"""Reboot the given instance."""
self._cast_compute_message('reboot_instance', context, instance_id)
def rebuild(self, context, instance_id, image_id, metadata=None,
files_to_inject=None):
"""Rebuild the given instance with the provided metadata."""
instance = db.api.instance_get(context, instance_id)
if instance["state"] == power_state.BUILDING:
msg = _("Instance already building")
raise exception.BuildInProgress(msg)
metadata = metadata or {}
self._check_metadata_properties_quota(context, metadata)
files_to_inject = files_to_inject or []
self._check_injected_file_quota(context, files_to_inject)
self.db.instance_update(context, instance_id, {"metadata": metadata})
rebuild_params = {
"image_id": image_id,
"injected_files": files_to_inject,
}
self._cast_compute_message('rebuild_instance',
context,
instance_id,
params=rebuild_params)
def revert_resize(self, context, instance_id):
"""Reverts a resize, deleting the 'new' instance in the process."""
context = context.elevated()
migration_ref = self.db.migration_get_by_instance_and_status(context,
instance_id, 'finished')
if not migration_ref:
raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
status='finished')
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('revert_resize', context, instance_id,
migration_ref['dest_compute'], params=params)
self.db.migration_update(context, migration_ref['id'],
{'status': 'reverted'})
def confirm_resize(self, context, instance_id):
"""Confirms a migration/resize and deletes the 'old' instance."""
context = context.elevated()
migration_ref = self.db.migration_get_by_instance_and_status(context,
instance_id, 'finished')
if not migration_ref:
raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
status='finished')
instance_ref = self.db.instance_get(context, instance_id)
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('confirm_resize', context, instance_id,
migration_ref['source_compute'], params=params)
self.db.migration_update(context, migration_ref['id'],
{'status': 'confirmed'})
self.db.instance_update(context, instance_id,
{'host': migration_ref['dest_compute'], })
def resize(self, context, instance_id, flavor_id):
"""Resize a running instance."""
instance = self.db.instance_get(context, instance_id)
current_instance_type = instance['instance_type']
new_instance_type = self.db.instance_type_get_by_flavor_id(
context, flavor_id)
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s") % locals())
if not new_instance_type:
raise exception.ApiError(_("Requested flavor %(flavor_id)d "
"does not exist") % locals())
current_memory_mb = current_instance_type['memory_mb']
new_memory_mb = new_instance_type['memory_mb']
if current_memory_mb > new_memory_mb:
raise exception.ApiError(_("Invalid flavor: cannot downsize"
"instances"))
if current_memory_mb == new_memory_mb:
raise exception.ApiError(_("Invalid flavor: cannot use"
"the same flavor. "))
self._cast_scheduler_message(context,
{"method": "prep_resize",
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id,
"flavor_id": flavor_id}})
@scheduler_api.reroute_compute("pause")
def pause(self, context, instance_id):
"""Pause the given instance."""
self._cast_compute_message('pause_instance', context, instance_id)
@scheduler_api.reroute_compute("unpause")
def unpause(self, context, instance_id):
"""Unpause the given instance."""
self._cast_compute_message('unpause_instance', context, instance_id)
@scheduler_api.reroute_compute("diagnostics")
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for the given instance."""
return self._call_compute_message("get_diagnostics",
context,
instance_id)
def get_actions(self, context, instance_id):
"""Retrieve actions for the given instance."""
return self.db.instance_get_actions(context, instance_id)
@scheduler_api.reroute_compute("suspend")
def suspend(self, context, instance_id):
"""Suspend the given instance."""
self._cast_compute_message('suspend_instance', context, instance_id)
@scheduler_api.reroute_compute("resume")
def resume(self, context, instance_id):
"""Resume the given instance."""
self._cast_compute_message('resume_instance', context, instance_id)
@scheduler_api.reroute_compute("rescue")
def rescue(self, context, instance_id):
"""Rescue the given instance."""
self._cast_compute_message('rescue_instance', context, instance_id)
@scheduler_api.reroute_compute("unrescue")
def unrescue(self, context, instance_id):
"""Unrescue the given instance."""
self._cast_compute_message('unrescue_instance', context, instance_id)
def set_admin_password(self, context, instance_id, password=None):
"""Set the root/admin password for the given instance."""
eventlet.spawn_n(self._set_admin_password, context, instance_id,
password)
def inject_file(self, context, instance_id):
"""Write a file to the given instance."""
self._cast_compute_message('inject_file', context, instance_id)
def get_ajax_console(self, context, instance_id):
"""Get a url to an AJAX Console."""
output = self._call_compute_message('get_ajax_console',
context,
instance_id)
rpc.cast(context, '%s' % FLAGS.ajax_console_proxy_topic,
{'method': 'authorize_ajax_console',
'args': {'token': output['token'], 'host': output['host'],
'port': output['port']}})
return {'url': '%s/?token=%s' % (FLAGS.ajax_console_proxy_url,
output['token'])}
def get_vnc_console(self, context, instance_id):
"""Get a url to a VNC Console."""
instance = self.get(context, instance_id)
output = self._call_compute_message('get_vnc_console',
context,
instance_id)
rpc.call(context, '%s' % FLAGS.vncproxy_topic,
{'method': 'authorize_vnc_console',
'args': {'token': output['token'],
'host': output['host'],
'port': output['port']}})
# hostignore and portignore are compatability params for noVNC
return {'url': '%s/vnc_auto.html?token=%s&host=%s&port=%s' % (
FLAGS.vncproxy_url,
output['token'],
'hostignore',
'portignore')}
def get_console_output(self, context, instance_id):
"""Get console output for an an instance."""
return self._call_compute_message('get_console_output',
context,
instance_id)
def lock(self, context, instance_id):
"""Lock the given instance."""
self._cast_compute_message('lock_instance', context, instance_id)
def unlock(self, context, instance_id):
"""Unlock the given instance."""
self._cast_compute_message('unlock_instance', context, instance_id)
def get_lock(self, context, instance_id):
"""Return the boolean state of given instance's lock."""
instance = self.get(context, instance_id)
return instance['locked']
def reset_network(self, context, instance_id):
"""Reset networking on the instance."""
self._cast_compute_message('reset_network', context, instance_id)
def inject_network_info(self, context, instance_id):
"""Inject network info for the instance."""
self._cast_compute_message('inject_network_info', context, instance_id)
def attach_volume(self, context, instance_id, volume_id, device):
"""Attach an existing volume to an existing instance."""
if not re.match("^/dev/[a-z]d[a-z]+$", device):
raise exception.ApiError(_("Invalid device specified: %s. "
"Example device: /dev/vdb") % device)
self.volume_api.check_attach(context, volume_id=volume_id)
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "attach_volume",
"args": {"volume_id": volume_id,
"instance_id": instance_id,
"mountpoint": device}})
def detach_volume(self, context, volume_id):
"""Detach a volume from an instance."""
instance = self.db.volume_get_instance(context.elevated(), volume_id)
if not instance:
raise exception.ApiError(_("Volume isn't attached to anything!"))
self.volume_api.check_detach(context, volume_id=volume_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "detach_volume",
"args": {"instance_id": instance['id'],
"volume_id": volume_id}})
return instance
def associate_floating_ip(self, context, instance_id, address):
"""Associate a floating ip with an instance."""
instance = self.get(context, instance_id)
self.network_api.associate_floating_ip(context,
floating_ip=address,
fixed_ip=instance['fixed_ip'])
def get_instance_metadata(self, context, instance_id):
"""Get all metadata associated with an instance."""
rv = self.db.instance_metadata_get(context, instance_id)
return dict(rv.iteritems())
def delete_instance_metadata(self, context, instance_id, key):
"""Delete the given metadata item from an instance."""
self.db.instance_metadata_delete(context, instance_id, key)
def update_or_create_instance_metadata(self, context, instance_id,
metadata):
"""Updates or creates instance metadata."""
combined_metadata = self.get_instance_metadata(context, instance_id)
combined_metadata.update(metadata)
self._check_metadata_properties_quota(context, combined_metadata)
self.db.instance_metadata_update_or_create(context, instance_id,
metadata)
| {
"content_hash": "3adf13d39a59e1ffb4a33fa1c0dbc182",
"timestamp": "",
"source": "github",
"line_count": 767,
"max_line_length": 79,
"avg_line_length": 42.238591916558015,
"alnum_prop": 0.5639719727135228,
"repo_name": "superstack/nova",
"id": "a12b7dee5fa875d4f0e710c285286ffbc19d1fe2",
"size": "33174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/compute/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "47238"
},
{
"name": "Python",
"bytes": "2491049"
},
{
"name": "Shell",
"bytes": "31698"
}
],
"symlink_target": ""
} |
import logging
from django.contrib.gis.db import models
from census_places.enums import STATES
logger = logging.getLogger('census_places.models')
class PlaceBoundary(models.Model):
geo_id = models.CharField(
primary_key=True,
max_length=60
)
state = models.CharField(
max_length=2,
db_index=True,
choices=STATES
)
place = models.CharField(max_length=5)
name = models.CharField(max_length=90)
lsad = models.CharField(
null=True,
blank=True,
max_length=7
)
censusarea = models.FloatField(
verbose_name='census area')
geog = models.MultiPolygonField(
geography=True,
spatial_index=False#True # Breaks MySQL with InnoDB.
)
objects = models.GeoManager()
@classmethod
def get_containing(cls, point):
boundary = cls.objects.get(
geog__covers=point
)
logger.debug("Found geometry %s covering %s" % (
boundary,
point,
)
)
return boundary
@classmethod
def get_nearest_to(cls, point, stack_depth_maximum=5, stack_depth=1, buffer_size=0.2):
buffered_point = point.buffer(buffer_size)
cities = cls.objects.filter(geog__bboverlaps=buffered_point)\
.distance(point)\
.order_by('distance')
if cities.count() > 0:
city = cities[0]
logger.debug("Found geometry %s covering %s" % (
city,
point,
)
)
return city
else:
buffer_size = buffer_size * 2
stack_depth = stack_depth + 1
if stack_depth <= stack_depth_maximum:
logger.debug("Recursively calling with buffer: %s (stack depth: %s)" % (
buffer_size,
stack_depth
)
)
return cls.get_nearest_to(point, stack_depth_maximum, stack_depth, buffer_size)
else:
logger.debug(
"No geometry found; stack depth maximum encountered "
"at buffer of size %s" % buffer
)
raise cls.DoesNotExist(
"No cities were found within the range you specified; "
"try increasing your initial buffer_size from %s or "
"your stack_depth_maximum from %s." % (
buffer_size,
stack_depth_maximum
)
)
def __unicode__(self):
return "%s, %s" % (self.name, self.get_state_display())
class Meta:
ordering = ['name', ]
verbose_name_plural = "Place Boundaries"
verbose_name = "Place Boundary"
# app_label = 'census places'
# db_table = 'census_places_placeboundary'
class ZIPBoundary(models.Model):
geo_id = models.CharField(
primary_key=True,
max_length=60)
state = models.CharField(
max_length=2,
db_index=True,
choices=STATES)
zip_code = models.CharField(
max_length=10,
unique=True,
db_index=True)
classfp10 = models.CharField(
max_length=10,
db_index=True)
mtfcc10 = models.CharField(
max_length=10,
db_index=True)
funcstat10 = models.CharField(
max_length=10,
db_index=True)
aland10 = models.PositiveIntegerField(
verbose_name='land area')
awater10 = models.PositiveIntegerField(
verbose_name='water area')
lat = models.DecimalField(
max_digits=15,
decimal_places=10,
verbose_name='latitude')
lng = models.DecimalField(
max_digits=15,
decimal_places=10,
verbose_name='longitude')
partflg10 = models.CharField(
max_length=10,
db_index=True)
geog = models.MultiPolygonField(
geography=True,
spatial_index=False#True # Breaks MySQL with InnoDB.
)
objects = models.GeoManager()
def __unicode__(self):
return "%s" % (self.zip_code,)
class Meta:
ordering = ['zip_code', ]
verbose_name_plural = "ZIP Boundaries"
verbose_name = "ZIP Boundary"
# app_label = 'census places'
# db_table = 'census_places_zipboundary'
| {
"content_hash": "f4fa83e4cc39b006cabed7bbd16f0f28",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 95,
"avg_line_length": 27.835365853658537,
"alnum_prop": 0.5235487404162102,
"repo_name": "coddingtonbear/django-census-places",
"id": "a82cde3e51d85340712fda9b23e554505087bbb4",
"size": "4565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "census_places/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22391"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name = 'PttWebCrawler',
packages = ['PttWebCrawler'],
version = '1.4',
description = 'ptt web crawler',
author = '',
author_email = '',
url = 'https://github.com/jwlin/ptt-web-crawler',
download_url = '',
keywords = [],
classifiers = [],
license='MIT',
install_requires=[
'argparse',
'beautifulsoup4',
'requests',
'six',
'pyOpenSSL'
],
entry_points={
'console_scripts': [
'PttWebCrawler = PttWebCrawler.__main__:main'
]
},
zip_safe=True
)
| {
"content_hash": "eff253f8183420a6ca55336ebe035a26",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 57,
"avg_line_length": 21.741935483870968,
"alnum_prop": 0.5459940652818991,
"repo_name": "jikotw74/StockBin",
"id": "e18d641767c3ec98aa3a25ec15c7b5e5c5712c89",
"size": "674",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ptt-web-crawler-master/setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14583"
},
{
"name": "HTML",
"bytes": "1611"
},
{
"name": "JavaScript",
"bytes": "105240"
},
{
"name": "Python",
"bytes": "13103"
}
],
"symlink_target": ""
} |
import math
from ummalqura_arrray import UmalqurraArray
'''
This class is responsoble to convert from Hijri to Gregorian or from Gregorian to Hijri
The algrothem was converted from java script to python by Khalid Al-hussayen in 1436-3-14 2015-1-5
The orjinal source developed by Suhail Alkowaileet the source url https://github.com/xsoh/Hijri.js/blob/master/Hijri.js
'''
class Umalqurra:
def gegorean_to_hijri(self, year, month, day):
# This code the modified version of R.H. van Gent Code, it can be found at http://www.staff.science.uu.nl/~gent0113/islam/ummalqura.htm
# read calendar data
day = int(day)
m = int(month) # Here we enter the Index of the month (which starts with Zero)
y = int(year)
# append January and February to the previous year (i.e. regard March as
# the first month of the year in order to simplify leapday corrections)
if m < 3:
y -= 1
m += 12
# determine offset between Julian and Gregorian calendar
a = math.floor(y / 100.)
jgc = a - math.floor(a / 4.) - 2
# compute Chronological Julian Day Number (CJDN)
cjdn = math.floor(365.25 * (y + 4716)) + math.floor(30.6001 * (m + 1)) + day - jgc - 1524
a = math.floor((cjdn - 1867216.25) / 36524.25)
# compute Modified Chronological Julian Day Number (MCJDN)
mcjdn = cjdn - 2400000
#the MCJDN's of the start of the lunations in the Umm al-Qura calendar are stored in 'islamcalendar_dat.js'
index = UmalqurraArray.get_index(mcjdn)
# compute and output the Umm al-Qura calendar date
iln = index + 16260
ii = math.floor((iln - 1) / 12)
iy = ii + 1
im = iln - 12 * ii
id = mcjdn - UmalqurraArray.ummalqura_dat[index - 1] + 1
return iy, im, id
def hijri_to_gregorian(self, year, month, day):
year = int(year)
month = int(month)
day = int(day)
iy = year
im = month
id = day
ii = iy - 1
iln = (ii * 12) + 1 + (im - 1)
i = iln - 16260
mcjdn = id + UmalqurraArray.ummalqura_dat[i - 1] - 1
cjdn = mcjdn + 2400000
return self.julianToGregorian(cjdn);
def julianToGregorian(self,julianDate):
# source from: http://keith-wood.name/calendars.html
z = math.floor(julianDate + 0.5)
a = math.floor((z - 1867216.25) / 36524.25)
a = z + 1 + a - math.floor(a / 4)
b = a + 1524;
c = math.floor((b - 122.1) / 365.25)
d = math.floor(365.25 * c)
e = math.floor((b - d) / 30.6001)
day = b - d - math.floor(e * 30.6001)
if e > 13.5:
month = e - 13
else:
month = e - 1
if month > 2.5:
year = c - 4716
else:
year = c - 4715
if year <= 0:
year -= 1
return year, month , day
| {
"content_hash": "bf210861915845f518c8ac149b0ae57a",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 143,
"avg_line_length": 39.71621621621622,
"alnum_prop": 0.5695814903028241,
"repo_name": "mpercich/Calendarize",
"id": "796d52d482201dc03caa30f99f5ad1738d315e0e",
"size": "2939",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ios/dateparser/lib/python2.7/site-packages/umalqurra/hijri.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "859680"
},
{
"name": "C++",
"bytes": "1621286"
},
{
"name": "M4",
"bytes": "1690"
},
{
"name": "Objective-C",
"bytes": "17561"
},
{
"name": "Objective-C++",
"bytes": "1764"
},
{
"name": "Python",
"bytes": "2941199"
},
{
"name": "Shell",
"bytes": "3252"
},
{
"name": "Swift",
"bytes": "5812"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, "tests")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
if __name__ == "__main__":
from django.core.management import execute_from_command_line
args = sys.argv
args.insert(1, "test")
args.insert(2, "filemanager")
execute_from_command_line(args)
| {
"content_hash": "cd50b705e80508b07874a4003b32d020",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 64,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.684375,
"repo_name": "IMGIITRoorkee/django-filemanager",
"id": "7e016b4bc7922685394611f2a0465c76581c6e94",
"size": "367",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5572"
},
{
"name": "HTML",
"bytes": "7141"
},
{
"name": "JavaScript",
"bytes": "12364"
},
{
"name": "Python",
"bytes": "26683"
},
{
"name": "Shell",
"bytes": "193"
}
],
"symlink_target": ""
} |
import logging
import random # TODO: replace with safe random.
import pendulum
from synth.common.ordinal import as_ordinal
from synth.common.conftime import get_interval
from synth.devices.device import Device
from synth.devices.blb_helpers.solar_math import sun_bright
logger = logging.getLogger(__name__)
class Blb(Device):
""" Battery (powered) light (measuring) button. """
def __init__(self, conf, engine, client):
super(Device, self).__init__()
self.engine = engine
self.client = client
# generate identifiers
self.id = "-".join([format(random.randrange(0, 255), '02x') for i in range(6)]) # i.e. a MAC address
self.is_demo_device = conf.get('isDemoDevice', True) # identifier for later deletion.
self.label = conf.get('name', 'Thing ' + self.id)
self.firmware = random.choice(["0.51","0.52","0.6","0.6","0.6","0.7","0.7","0.7","0.7"])
self.factory_firmware = self.firmware
self.operator = random.choice(["O2","O2","O2","EE","EE","EE","EE","EE"])
# setup battery
self.battery = 100
if 'batteryLifeMu' in conf and 'batteryLifeSigma' in conf:
battery_life_mu = get_interval(conf, 'batteryLifeMu', None).total_seconds()
battery_life_sigma = get_interval(conf, 'batteryLifeSigma', None).total_seconds()
battery_life_min = battery_life_mu - (2 * battery_life_sigma)
battery_life_max = battery_life_mu + (2 * battery_life_sigma)
battery_life = random.normalvariate(battery_life_mu, battery_life_sigma)
# noinspection PyArgumentList
self.battery_life = pendulum.interval(seconds=max(min(battery_life, battery_life_min), battery_life_max))
else:
# noinspection PyArgumentList
self.battery_life = get_interval(conf, 'batteryLife', pendulum.interval(minutes=5))
self.battery_auto_replace = conf.get('batteryAutoReplace', False)
self.engine.register_event_in(self.battery_decay, self.battery_life / 100, None, self)
# setup button press counter
self.button_press_count = 0
self.engine.register_event_in(self.press_button, pendulum.interval(), None, self)
# setup light measurement
self.longitude = conf.get('longitude', 0)
self.latitude = conf.get('latitude', 0)
self.light = 0.0
# noinspection PyArgumentList
self.engine.register_event_in(self.measure_light, pendulum.interval(hours=12), None, self)
self.client.add_device(self.id, engine.get_now(), {
'battery': self.battery,
'longitude': self.longitude,
'latitude': self.latitude,
})
def press_button(self, time):
if self.battery > 0:
self.button_press_count += 1
self.client.update_device(self.id, time, {'buttonPress': self.button_press_count})
# noinspection PyArgumentList
next_press_interval = pendulum.interval(hours=1) # TODO: timewave?
# timewave
# .next_usage_time
# synth.simulation.sim.get_time(),
# ["Mon", "Tue", "Wed", "Thu", "Fri"], "06:00-09:00"
logger.info("{id}: Pressed button for the {nth} time.".format(
id=self.id,
nth=as_ordinal(self.button_press_count),
))
self.engine.register_event_in(self.press_button, next_press_interval, None, self)
def battery_decay(self, time):
self.battery -= 1
if self.battery <= 0 and self.battery_auto_replace:
logger.info("{id}: Auto-replacing battery.".format(id=self.id))
self.battery = 100
logger.info("{id}: Battery decayed to {battery}".format(id=self.id, battery=self.battery))
self.client.update_device(self.id, time, {'battery': self.battery})
if self.battery > 0:
self.engine.register_event_in(self.battery_decay, self.battery_life / 100, None, self)
def measure_light(self, time):
if self.battery > 0:
self.light = sun_bright(time.int_timestamp, (self.longitude, self.latitude))
# sun_bright(synth.simulation.sim.get_time(),
# (float(Device.get_property(self, "longitude")),
# float(Device.get_property(self, "latitude")))
# ))
self.client.update_device(self.id, time, {'light': self.light})
# noinspection PyArgumentList
self.engine.register_event_in(self.measure_light, pendulum.interval(hours=1), None, self)
| {
"content_hash": "189c725b3d36b0168b3c61080f332c85",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 117,
"avg_line_length": 44.90196078431372,
"alnum_prop": 0.614410480349345,
"repo_name": "DevicePilot/synth",
"id": "a1097b4a160f562b84147814aa4b199524a82759",
"size": "4580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synth/devices/unused/blb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "605"
},
{
"name": "Python",
"bytes": "628644"
},
{
"name": "Shell",
"bytes": "212"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TicksuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="ticksuffix", parent_name="funnel.marker.colorbar", **kwargs
):
super(TicksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "ad644217b951896caf84c40ac8706a20",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 86,
"avg_line_length": 34.642857142857146,
"alnum_prop": 0.6103092783505155,
"repo_name": "plotly/python-api",
"id": "bc1859cdc6fd7420eea5f994c9757ff99f552a4b",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnel/marker/colorbar/_ticksuffix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from Code.DataStructures.PrimeCurves import P192, P384
from Code.PerformanceComparison.joint_mul import JointMultiplicationScalarPerformanceTest
joint_mul_affine_192_33 = JointMultiplicationScalarPerformanceTest(1000, P192, [2**5, 2**32], 3, 3)
joint_mul_affine_192Big_33 = JointMultiplicationScalarPerformanceTest(1000, P192, [2**128, 2**192], 3, 3)
joint_mul_affine_192Big_66 = JointMultiplicationScalarPerformanceTest(1000, P192, [2**128, 2**192], 6, 6)
joint_mul_jacobi_192_33 = JointMultiplicationScalarPerformanceTest(1000, P192, [2**5, 2**32], 3, 3, jacobi=True)
joint_mul_jacobi_192Big_33 = JointMultiplicationScalarPerformanceTest(1000, P192, [2**128, 2**192], 3, 3, jacobi=True)
joint_mul_jacobi_192Big_34 = JointMultiplicationScalarPerformanceTest(1000, P192, [2**128, 2**192], 3, 4, jacobi=True)
joint_mul_jacobi_192Big_44 = JointMultiplicationScalarPerformanceTest(1000, P192, [2**128, 2**192], 4, 4, jacobi=True)
joint_mul_jacobi_192Big_45 = JointMultiplicationScalarPerformanceTest(1000, P192, [2**128, 2**192], 4, 5, jacobi=True)
joint_mul_jacobi_192Big_55 = JointMultiplicationScalarPerformanceTest(1000, P192, [2**128, 2**192], 5, 5, jacobi=True)
joint_mul_jacobi_192_66 = JointMultiplicationScalarPerformanceTest(1000, P192, [2**5, 2**32], 6, 6, jacobi=True)
joint_mul_jacobi_192Big_66 = JointMultiplicationScalarPerformanceTest(1000, P192, [2**128, 2**192], 6, 6, jacobi=True)
joint_mul_affine_384_33 = JointMultiplicationScalarPerformanceTest(1000, P384, [2**5, 2**32], 3, 3)
joint_mul_affine_384Big_33 = JointMultiplicationScalarPerformanceTest(1000, P384, [2**128, 2**192], 3, 3)
joint_mul_affine_384Big_66 = JointMultiplicationScalarPerformanceTest(1000, P384, [2**330, 2**384], 6, 6)
joint_mul_jacobi_384_33 = JointMultiplicationScalarPerformanceTest(1000, P384, [2**5, 2**32], 3, 3, jacobi=True)
joint_mul_jacobi_384Big_33 = JointMultiplicationScalarPerformanceTest(1000, P384, [2**330, 2**384], 3, 3, jacobi=True)
joint_mul_jacobi_384Big_34 = JointMultiplicationScalarPerformanceTest(1000, P384, [2**330, 2**384], 3, 4, jacobi=True)
joint_mul_jacobi_384Big_44 = JointMultiplicationScalarPerformanceTest(1000, P384, [2**330, 2**384], 4, 4, jacobi=True)
joint_mul_jacobi_384Big_45 = JointMultiplicationScalarPerformanceTest(1000, P384, [2**330, 2**384], 4, 5, jacobi=True)
joint_mul_jacobi_384Big_55 = JointMultiplicationScalarPerformanceTest(1000, P384, [2**330, 2**384], 5, 5, jacobi=True)
joint_mul_jacobi_384_66 = JointMultiplicationScalarPerformanceTest(1000, P384, [2**5, 2**32], 6, 6, jacobi=True)
joint_mul_jacobi_384Big_66 = JointMultiplicationScalarPerformanceTest(1000, P384, [2**330, 2**384], 6, 6, jacobi=True)
print("*** 192 ***")
print("Brute")
print("Affine Brute")
print(joint_mul_affine_192_33.brute_force_test())
print(joint_mul_affine_192Big_66.brute_force_test())
print("Jacobi Brute")
print(joint_mul_jacobi_192_33.brute_force_test())
print(joint_mul_jacobi_192Big_33.brute_force_test())
print("JSF")
print("Affine JSF")
print(joint_mul_affine_192_33.JSF_mul_test())
print(joint_mul_affine_192Big_66.JSF_mul_test())
print("Jacobi JSF")
print(joint_mul_jacobi_192_33.JSF_mul_test())
print(joint_mul_jacobi_192Big_33.JSF_mul_test())
print("Interleaving")
print("Affine interleaving")
print(joint_mul_affine_192_33.interleaving_sliding_window_test()) #adaugat
print(joint_mul_affine_192Big_66.interleaving_sliding_window_test()) #adaugat
print("Jacobi Interleaving")
print(joint_mul_jacobi_192_66.interleaving_sliding_window_test())
print(joint_mul_jacobi_192Big_66.interleaving_sliding_window_test())
print("DONE")
print("Interleaving different windows test, must be ignored by averages for Big/Small, and algo comparison")
print(joint_mul_jacobi_192Big_33.interleaving_sliding_window_test())
print(joint_mul_jacobi_192Big_34.interleaving_sliding_window_test())
print(joint_mul_jacobi_192Big_44.interleaving_sliding_window_test())
print(joint_mul_jacobi_192Big_45.interleaving_sliding_window_test())
print(joint_mul_jacobi_192Big_55.interleaving_sliding_window_test())
print(joint_mul_jacobi_192Big_66.interleaving_sliding_window_test())
print("***")
print("*** 384 ***")
print("Brute")
print("Affine Brute")
print(joint_mul_affine_384_33.brute_force_test())
print(joint_mul_affine_384Big_66.brute_force_test())
print("Jacobi Brute")
print(joint_mul_jacobi_384_33.brute_force_test())
print(joint_mul_jacobi_384Big_33.brute_force_test())
print("JSF")
print("Affine JSF")
print(joint_mul_affine_384_33.JSF_mul_test())
print(joint_mul_affine_384Big_66.JSF_mul_test())
print("Jacobi JSF")
print(joint_mul_jacobi_384_33.JSF_mul_test())
print(joint_mul_jacobi_384Big_33.JSF_mul_test())
print("Interleaving")
print("Affine interleaving")
print(joint_mul_affine_384_33.interleaving_sliding_window_test()) #adaugat
print(joint_mul_affine_384Big_66.interleaving_sliding_window_test())
print("Jacobi Interleaving")
print(joint_mul_jacobi_384_66.interleaving_sliding_window_test())
print(joint_mul_jacobi_384Big_66.interleaving_sliding_window_test())
print("DONE")
print("Interleaving different windows test, must be ignored by averages for Big/Small, and algo comparison")
print(joint_mul_jacobi_384Big_33.interleaving_sliding_window_test())
print(joint_mul_jacobi_384Big_34.interleaving_sliding_window_test())
print(joint_mul_jacobi_384Big_44.interleaving_sliding_window_test())
print(joint_mul_jacobi_384Big_45.interleaving_sliding_window_test())
print(joint_mul_jacobi_384Big_55.interleaving_sliding_window_test())
print(joint_mul_jacobi_384Big_66.interleaving_sliding_window_test())
print("***")
| {
"content_hash": "01bd238752f0218558b4e8856de19582",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 118,
"avg_line_length": 57.16494845360825,
"alnum_prop": 0.7655545536519387,
"repo_name": "dev-alberto/Bachelor2017",
"id": "c0cd8c64e2d9134e887ea9bb86244d54929a0b81",
"size": "5545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Code/PerformanceComparison/TestScripts/joint_scalar_mul_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4806"
},
{
"name": "Makefile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "80810"
},
{
"name": "TeX",
"bytes": "108114"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
def hive_service(
name,
action='start'):
import params
if name == 'metastore':
pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
cmd = format(
"env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.err {pid_file} {hive_server_conf_dir}")
elif name == 'hiveserver2':
pid_file = format("{hive_pid_dir}/{hive_pid}")
cmd = format(
"env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.err {pid_file} {hive_server_conf_dir}")
if action == 'start':
demon_cmd = format("{cmd}")
no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
Execute(demon_cmd,
user=params.hive_user,
not_if=no_op_test
)
if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
db_connection_check_command = format(
"{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification '{hive_jdbc_connection_url}' {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}")
Execute(db_connection_check_command,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin')
elif action == 'stop':
demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
Execute(demon_cmd)
| {
"content_hash": "5b8108018fed12dbfcda410379ae0733",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 249,
"avg_line_length": 42,
"alnum_prop": 0.6943722943722944,
"repo_name": "arenadata/ambari",
"id": "b4afb66388f7f7637906616aa267109ea79fad5b",
"size": "2332",
"binary": false,
"copies": "5",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/package/scripts/hive_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
import abc
from loggers import Actions
class BaseLogger(object):
"""
An abstract logger class. Contains the skeleton code and abstract methods to implement a full logger.
Inherit from this class to create a different logger.
"""
def __init__(self, output_controller, search_context):
self._output_controller = output_controller
self._search_context = search_context
self._queries_exhausted = False
def log_action(self, action_name, **kwargs):
"""
A nice helper method which is publicly exposed for logging an event.
Import loggers.Actions to use the appropriate event type when determining the action.
Use additional keywords to provide additional arguments to the logger.
"""
action_mapping = {
Actions.QUERY : self._log_query,
Actions.SERP : self._log_serp,
Actions.SNIPPET: self._log_snippet,
Actions.DOC : self._log_assess,
Actions.MARK : self._log_mark_document,
}
if action_mapping[action_name]:
action_mapping[action_name](**kwargs)
else:
self.__log_unknown_action(action_name)
@abc.abstractmethod
def get_last_query_time(self):
return 0
@abc.abstractmethod
def get_last_interaction_time(self):
return 0
@abc.abstractmethod
def get_last_marked_time(self):
return 0
@abc.abstractmethod
def get_last_relevant_snippet_time(self):
return 0
@abc.abstractmethod
def get_progress(self):
"""
Abstract method. Returns a value between 0 and 1 representing the progress of the simulation.
0 represents the start of the simulation, and 1 represents total completion (e.g. the user's time limit has elapsed.)
If the progress of the simulation cannot be determined, return None.
"""
return None
@abc.abstractmethod
def is_finished(self):
"""
Abstract method, only returns indication as to whether the list of queries has been exhausted.
Extend this method to include additional checks to see if the user has reached the limit to what they can do.
Depending on the implemented logger, this could be the number of queries issued, a time limit, etc...
"""
return self._queries_exhausted
def queries_exhausted(self):
"""
This method is called when the list of queries to be issued has been exhausted.
Sets an internal flag within the Logger, meaning that the next call to .is_finished() will stop the process.
"""
self._queries_exhausted = True
def _report(self, action, **kwargs):
"""
A simple method to report the current action being logged.
Extend this method and call the parent implementation (via super()) to include additional details.
"""
return "ACTION {0} ".format(action)
@abc.abstractmethod
def _log_query(self, **kwargs):
"""
Abstract method. When inheriting from this class, implement this method to appropriately handle a query event.
Returns None.
"""
pass
@abc.abstractmethod
def _log_serp(self, **kwargs):
"""
Abstract method. When inheriting from this class, implement this method to appropriately handle a SERP examination.
Returns None.
"""
pass
@abc.abstractmethod
def _log_snippet(self, **kwargs):
"""
Abstract method. When inheriting from this class, implement this method to appropriately handle the examination of a snippet.
Returns None.
"""
pass
@abc.abstractmethod
def _log_assess(self, **kwargs):
"""
Abstract method. When inheriting from this class, implement this method to appropriately handle assessing a document.
Returns None.
"""
pass
@abc.abstractmethod
def _log_mark_document(self, **kwargs):
"""
Abstract method. When inheriting from this class, implement this method to appropriately handle the costs of marking a document.
Returns None.
"""
pass
def __log_unknown_action(self):
self._report('UNKNOWN ACTION') | {
"content_hash": "ce7ea90e99b122a1928cfcda2dc6dbfd",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 136,
"avg_line_length": 35.68032786885246,
"alnum_prop": 0.6283023202389157,
"repo_name": "leifos/simiir",
"id": "ef2ef87d0b868af7756d3051688a7eeb59973355",
"size": "4353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simiir/loggers/base_logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Click",
"bytes": "46035088"
},
{
"name": "Python",
"bytes": "276054"
}
],
"symlink_target": ""
} |
from oslo.config import cfg
from dnrm.openstack.common import context as req_context
from dnrm.openstack.common.gettextutils import _ # noqa
from dnrm.openstack.common import log as logging
from dnrm.openstack.common import rpc
LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'notification_topics', default=['notifications', ],
help='AMQP topic used for OpenStack notifications')
CONF = cfg.CONF
CONF.register_opt(notification_topic_opt)
def notify(context, message):
"""Sends a notification via RPC."""
if not context:
context = req_context.get_admin_context()
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
for topic in CONF.notification_topics:
topic = '%s.%s' % (topic, priority)
try:
rpc.notify(context, topic, message)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals())
| {
"content_hash": "6b6bc98a0363e2d3392e7fc6411bf733",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 72,
"avg_line_length": 34.096774193548384,
"alnum_prop": 0.6594134342478714,
"repo_name": "Brocade-OpenSource/OpenStack-DNRM",
"id": "494c799472b6affed7ce6545e3f826b5ebfe9574",
"size": "1694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dnrm/openstack/common/notifier/rpc_notifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "603854"
},
{
"name": "Shell",
"bytes": "6704"
}
],
"symlink_target": ""
} |
import sqlite3
import Course
import Student
class SQLWrapper:
def __init__(self):
'''Initialize SQLWrapper.'''
# Create a database file
self.con = sqlite3.connect('timetable.db')
# Connect to the database (locally)
self.cursor = self.con.cursor()
def createStudentsTable(self):
"""
Creates a table to store student data
"""
statement = "CREATE TABLE IF NOT EXISTS Students (studentId INTEGER, grade INTEGER, selectedCourses TEXT, password TEXT, finalCourses TEXT)"
self.cursor.execute(statement)
self.con.commit()
def createCoursesTable(self):
"""
Creates a courses table
"""
statement = "CREATE TABLE IF NOT EXISTS Courses (courseId INTEGER, name TEXT, priority INTEGER)"
self.cursor.execute(statement)
self.con.commit()
def generateCourses(self):
"""
Generates courses for the courses table
"""
# Check if courses aren't already generated
if len(self.getAllCourses()) > 0:
return
Courses = ["English", "Math", "Comp Sci", "Entrepreneurship",
"Accounting", "History", "Geography", "Music",
"Religion"] #List of Courses
#Adds courses with different priorities
i = 0
for i in range (0,2):
self.addCourse(i + 1, Courses[i], 3)
for i in range (0,5):
self.addCourse(i + 3, Courses[i+2], 2)
for i in range (0,2):
self.addCourse(i+8, Courses[i+7],1)
def addStudent(self, studentId, grade, password):
""" (int, int, str) -> (none)
Adds a student into the database
"""
statement = "INSERT INTO Students VALUES (%s, %s, '%s', '%s', '%s')" % (studentId, grade, None, password, None)
self.cursor.execute(statement)
self.con.commit()
def addStudentCourses (self, studentId, selectedCourses):
""" (int, list) -> (none)
Adds all courses that the Student chose
"""
# Use a string representation of the selectedCourses list to store it in the database
statement = 'UPDATE Students SET selectedCourses = "%s" WHERE studentId = %s' % (repr(selectedCourses), studentId)
self.cursor.execute(statement)
self.con.commit()
def addFinalCourses(self, studentId, finalCourses):
""" (int, list) -> (none)
Adds final timetable to Student.
"""
statement = 'UPDATE Students SET finalCourses = "%s" WHERE studentId = %s' % (repr(finalCourses), studentId)
self.cursor.execute(statement)
self.con.commit()
def addCourse(self, courseId, name, priority):
""" (int, str, int) -> (none)
Adds a course to the Students selected courses
"""
statement = "INSERT INTO Courses VALUES (%s, '%s', %s)" % (courseId, name, priority)
self.cursor.execute(statement)
self.con.commit()
def deleteStudent(self, studentId):
""" (str) -> (none)
Deletes a student from a selected course
"""
statement = "DELETE FROM Students WHERE studentId = %s" % (studentId)
self.cursor.execute(statement)
self.con.commit()
def getCourse(self, courseId):
""" (int) -> (Course)
Gets a course with the given id
"""
statement = "SELECT * FROM Courses WHERE courseId = %s" % (courseId)
self.cursor.execute(statement)
# Retrieve a single course and return it
row = self.cursor.fetchone()
if row != None:
return self.parseCourse(row)
else:
return None
def getAllCourses(self):
"""
Gets all courses
"""
statement = "SELECT * FROM Courses"
self.cursor.execute(statement)
courses = []
# Retrieve all courses and create a list of Course (model) objects and return it
rows = self.cursor.fetchall()
if rows == None:
return None
for row in rows:
courses.append(self.parseCourse(row))
return courses
def getStudent(self, studentId):
""" (int) -> (Student)
Gets a student with the given id
"""
statement = "SELECT * FROM Students WHERE studentId = %s" % (studentId)
self.cursor.execute(statement)
# Retrieve a single student and return it
row = self.cursor.fetchone()
if row != None:
return self.parseStudent(row)
else:
return None
def getAllStudents(self):
"""
Gets all students
"""
statement = "SELECT * FROM Students"
self.cursor.execute(statement)
students = []
# Retrieve all students and create a list of Student (model) objects and return it
rows = self.cursor.fetchall()
if rows == None:
return None
for row in rows:
students.append(self.parseStudent(row))
return students
def parseStudent(self, row):
return Student.Student(row[0], row[1], row[2], row[3], row[4])
def parseCourse(self, row):
return Course.Course(row[0], row[1], row[2])
| {
"content_hash": "b9796e1f96a18221b2d4588a9cd826f2",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 148,
"avg_line_length": 32.30061349693251,
"alnum_prop": 0.576068376068376,
"repo_name": "S-Kantor/Time-table-creator",
"id": "bbafa31bd51ecfafac9051e0230f9015b0f17d74",
"size": "5414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/SQLWrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33702"
}
],
"symlink_target": ""
} |
import pytest
pytest.register_assert_rewrite("webargs.testing")
| {
"content_hash": "4ce23df2bd5ec36aa5e498590560dd64",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 49,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.8153846153846154,
"repo_name": "sloria/webargs",
"id": "ad2e3ad0758b96b25bd669d36e9eeb2f333fee03",
"size": "65",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182724"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.