repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
kmike/tornado-slacker | slacker/__init__.py | Python | mit | 37 | 0 | fro | m slacker.postpone impo | rt Slacker
|
InterSIS/django-rest-serializer-field-permissions | rest_framework_serializer_field_permissions/permissions.py | Python | gpl-3.0 | 1,438 | 0.002782 | """
Permissions to use with the rest_framework_serializer_field_permissions.field classes:
class PersonSerializer(FieldPermissionSerializerMixin, serializers.ModelSerializer):
family_names = fields.CharField(permission_classes=(IsAuthenticated(), ))
given_names = fields.CharField(permission_clas | ses=(IsAuthenticated(), ))
nick_name = fields.CharField(permission_classes=(AllowAny(), ))
"""
class BaseFieldPermission(object):
"""
The permission from which all other field-permissions inherit.
Create your own field-permissions by extending this object and overriding
has_permission.
"""
# pylint: disable=no-self-use
def has_ | permission(self, request):
"""
Return true if permission is granted, return false if permission is
denied.
"""
return True
class AllowAny(BaseFieldPermission):
"""
Permission which allows free-access to the given field.
"""
def has_permission(self, request):
return True
class AllowNone(BaseFieldPermission):
"""
Permission which allows no access to the given field.
"""
def has_permission(self, request):
return False
class IsAuthenticated(BaseFieldPermission):
"""
Permission which only allows authenticated users access to the field.
"""
def has_permission(self, request):
return request.user and request.user.is_authenticated
|
eeue56/just-columns | just-columns/test.py | Python | bsd-3-clause | 447 | 0.008949 | from wrapper imp | ort get, run
import logging
import requests
@get('/')
def f(*args, **kwargs):
return '<html><head></head><body><h1>Hello!</h1></body></html>'
@get('/test', ['php'])
def test_f(*args, **kwargs):
arguments = kwargs['arguments']
php = arguments['php'][0]
self = args[0]
self.write("Head")
return 'Test{}'.format(php)
def test():
run(8888)
def main():
pass
if __name__ = | = '__main__':
test() |
cmjatai/cmj | cmj/core/migrations/0009_auto_20180220_0941.py | Python | gpl-3.0 | 578 | 0.001757 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-02-20 12:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0008_notificacao_user_origin'),
]
operations = [
| migrations.AlterModelOptions(
| name='notificacao',
options={'permissions': (('popup_notificacao', 'Visualização das notificações em Popup no Avatar do Usuário'),), 'verbose_name': 'Notificação', 'verbose_name_plural': 'Notificações'},
),
]
|
django-leonardo/django-leonardo | leonardo/fields/__init__.py | Python | bsd-3-clause | 209 | 0 |
import warnings
from leonardo.forms.fields import *
warnings. | warn('leonardo.field | s is obsolete'
' location use leonardo.forms instead'
'This location is only in migrations now')
|
joshainglis/python-soundscape | soundscape.py | Python | mit | 14,885 | 0.003225 | # Python 3 program for soundscape generation. (C) P.B.L. Meijer 2015
# Direct port of the hificode.c C program
# Last update: October 6, 2015; released under the Creative
# Commons Attribution 4.0 International License (CC BY 4.0),
# see http://www.seeingwithsound.com/im2sound.htm for details
#
# Beware that this program runs excruciati | ngly slowly under Python,
# while the PyPy python JIT compiler does not (yet) support OpenCV
import math
import os
import struct
import sys
import wave
import cv2 as cv
| import numpy as np
file_name = 'hificode.wav' # User-defined parameters
min_frequency = 500 # Lowest frequency (Hz) in soundscape
max_frequency = 5000 # Highest frequency (Hz)
sample_frequency = 44100 # Sample frequency (Hz)
image_to_sound_conversion_time = 1.05 # Image to sound conversion time (s)
use_exponential = False # Linear|Exponential=0|1 distribution
hifi = 1 # 8-bit|16-bit=0|1 sound quality
stereo = 1 # Mono|Stereo=0|1 sound selection
delay = 1 # Nodelay|Delay=0|1 model (stereo=1)
relative_fade = 1 # Relative fade No|Yes=0|1 (stereo=1)
diffraction = 1 # Diffraction No|Yes=0|1 (stereo=1)
use_b_spline = 1 # Rectangular|B-spline=0|1 time window
gray_levels = 0 # 16|2-level=0|1 gray format in P[][]
use_camera = 1 # Use OpenCV camera input No|Yes=0|1
use_screen = 1 # Screen view for debugging No|Yes=0|1
class Soundscape(object):
IR = 0
IA = 9301
IC = 49297
IM = 233280
TwoPi = 6.283185307179586476925287
WHITE = 1.00
BLACK = 0.00
def __init__(self, file_name='hificode.wav', min_frequency=500, max_frequency=5000, sample_frequency=44100,
image_to_sound_conversion_time=1.05, is_exponential=False, hifi=True, stereo=True, delay=True,
relative_fade=True, diffraction=True, use_b_spline=True, gray_levels=16, use_camera=True,
use_screen=True):
"""
:param file_name:
:type file_name: str
:param min_frequency:
:type min_frequency: int
:param max_frequency:
:type max_frequency: int
:param sample_frequency:
:type sample_frequency: int
:param image_to_sound_conversion_time:
:type image_to_sound_conversion_time: float
:param is_exponential:
:type is_exponential: bool
:param hifi:
:type hifi: bool
:param stereo:
:type stereo: bool
:param delay:
:type delay: bool
:param relative_fade:
:type relative_fade: bool
:param diffraction:
:type diffraction: bool
:param use_b_spline:
:type use_b_spline: bool
:param gray_levels:
:type gray_levels: int
:param use_camera:
:type use_camera: bool
:param use_screen:
:type use_screen: bool
:return:
:rtype:
"""
self.file_name = file_name
self.min_frequency = min_frequency
self.max_frequency = max_frequency
self.sample_frequency = sample_frequency
self.image_to_sound_conversion_time = image_to_sound_conversion_time
self.is_exponential = is_exponential
self.hifi = hifi
self.stereo = stereo
self.delay = delay
self.relative_fade = relative_fade
self.diffraction = diffraction
self.use_b_spline = use_b_spline
self.gray_levels = gray_levels
self.use_camera = use_camera
self.use_screen = use_screen
self.hist = (1 + self.hifi) * (1 + self.stereo)
if use_camera:
self.num_columns = 176
self.num_rows = 64
else:
self.num_columns = 64
self.num_rows = 64
self.k = 0
self.b = 0
self.num_frames = 2 * int(0.5 * self.sample_frequency * self.image_to_sound_conversion_time)
self.frames_per_column = int(self.num_frames / self.num_columns)
self.sso = 0 if self.hifi else 128
self.ssm = 32768 if self.hifi else 128
self.scale = 0.5 / math.sqrt(self.num_rows)
self.dt = 1.0 / self.sample_frequency
self.v = 340.0 # v = speed of sound (m/s)
self.hs = 0.20 # hs = characteristic acoustical size of head (m)
self.w = np.arange(self.num_rows, dtype=np.float)
self.phi0 = np.zeros(self.num_rows, dtype=np.float)
self.A = np.zeros((self.num_columns, self.num_rows), dtype=np.uint8)
# Coefficients used in rnd()
IR = 0
IA = 9301
IC = 49297
IM = 233280
TwoPi = 6.283185307179586476925287
HIST = (1 + hifi) * (1 + stereo)
WHITE = 1.00
BLACK = 0.00
if use_camera:
num_columns = 176
num_rows = 64
else:
num_columns = 64
num_rows = 64
# if gray_levels:
# else:
try:
# noinspection PyUnresolvedReferences
import winsound
except ImportError:
def playsound(frequency, duration):
# sudo dnf -y install beep
os.system('beep -f %s -l %s' % (frequency, duration))
else:
def playsound(frequency, duration):
winsound.Beep(frequency, duration)
# def playSound(file):
# if sys.platform == "win32":
# winsound.PlaySound(file, winsound.SND_FILENAME) # Windows only
# # os.system('start %s' %file) # Windows only
# elif sys.platform.startswith('linux'):
# print("No audio player called for Linux")
# else:
# print("No audio player called for your platform")
def wi(file_object, i):
b0 = int(i % 256)
b1 = int((i - b0) / 256)
file_object.write(struct.pack('B', b0 & 0xff))
file_object.write(struct.pack('B', b1 & 0xff))
def wl(fp, l):
i0 = l % 65536
i1 = (l - i0) / 65536
wi(fp, i0)
wi(fp, i1)
def rnd():
global IR, IA, IC, IM
IR = (IR * IA + IC) % IM
return IR / (1.0 * IM)
def main():
current_frame = 0
b = 0
num_frames = 2 * int(0.5 * sample_frequency * image_to_sound_conversion_time)
frames_per_column = int(num_frames / num_columns)
sso = 0 if hifi else 128
ssm = 32768 if hifi else 128
scale = 0.5 / math.sqrt(num_rows)
dt = 1.0 / sample_frequency
v = 340.0 # v = speed of sound (m/s)
hs = 0.20 # hs = characteristic acoustical size of head (m)
w = np.arange(num_rows, dtype=np.float)
phi0 = np.zeros(num_rows)
A = np.zeros((num_columns, num_rows), dtype=np.uint8)
# w = [0 for i in range(num_rows)]
# phi0 = [0 for i in range(num_rows)]
# A = [[0 for j in range(num_columns)] for i in range(num_rows)] # num_rows x num_columns pixel matrix
# Set lin|exp (0|1) frequency distribution and random initial phase
freq_ratio = max_frequency / float(min_frequency)
if use_exponential:
w = TwoPi * min_frequency * np.power(freq_ratio, w / (num_rows - 1))
for i in range(0, num_rows):
w[i] = TwoPi * min_frequency * pow(freq_ratio, 1.0 * i / (num_rows - 1))
else:
for i in range(0, num_rows):
w[i] = TwoPi * min_frequency + TwoPi * (max_frequency - min_frequency) * i / (
num_rows - 1)
for i in range(0, num_rows): phi0[i] = TwoPi * rnd()
cam_id = 0 # First available OpenCV camera
# Optionally override ID from command line parameter: python hificode_OpenCV.py cam_id
if len(sys.argv) > 1:
cam_id = int(sys.argv[1])
try:
# noinspection PyArgumentList
cap = cv.VideoCapture(cam_id)
if not cap.isOpened():
raise ValueError('camera ID')
except ValueError:
print("Could not open camera", cam_id)
raise
# Setting standard capture size, may fail; resize later
cap.read() # Dummy read needed with some devices
# noinspection PyUnresolvedReferences
cap.set(cv.CAP_PROP_FRAME_WIDTH, 176)
# noinspection PyUnresolvedReferences
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 144)
if use_screen: # Screen views only for debugging
cv.namedWindow('Large', cv.WINDOW_AUTOSIZE)
cv.namedWindow('Small', cv.WINDOW_AUTOSIZE)
key = 0
while key != 27: # Escape key
ret, frame = cap.read()
if not ret:
# Sometimes initial frames fail
print("Capture failed\n")
key = cv.waitKey(100)
|
tombstone/models | research/object_detection/meta_architectures/context_rcnn_lib.py | Python | apache-2.0 | 8,672 | 0.005996 | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library functions for ContextRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
# The negative value used in padding the invalid weights.
_NEGATIVE_PADDING_VALUE = -100000
def filter_weight_value(weights, values, valid_mask):
"""Filters weights and values based on valid_mask.
_NEGATIVE_PADDING_VALUE will be added to invalid elements in the weights to
avoid their contribution in softmax. 0 will be set for the invalid elements in
the values.
Args:
weights: A float Tensor of shape [batch_size, input_size, context_size].
values: A float Tensor of shape [batch_size, context_size,
projected_dimension].
valid_mask: A boolean Tensor of shape [batch_size, context_size]. True means
valid and False means invalid.
Returns:
weights: A float Tensor of shape [batch_size, input_size, context_size].
values: A float Tensor of shape [batch_size, context_size,
projected_dimension].
Raises:
ValueError: If shape of doesn't match.
"""
w_batch_size, _, w_context_size = weights.shape
v_batch_size, v_context_size, _ = values.shape
m_batch_size, m_context_size = valid_mask.shape
if w_batch_size != v_batch_size or v_batch_size != m_batch_size:
raise ValueError("Please make sure the first dimension of the input"
" tensors are the same.")
if w_context_size != v_context_size:
raise ValueError("Please make sure the third dimension of weights matches"
" the second dimension of values.")
if w_context_size != m_context_size:
raise ValueError("Please make sure the third dimension of the weights"
" matches the second dimension of the valid_mask.")
valid_mask = valid_mask[..., tf.newaxis]
# Force the invalid weights to be very negative so it won't contribute to
# the softmax.
weights += tf.transpose(
tf.cast(tf.math.logical_not(valid_mask), weights.dtype) *
_NEGATIVE_PADDING_VALUE,
perm=[0, 2, 1])
# Force the invalid values to be 0.
values *= tf.cast(valid_mask, values.dtype)
return weights, values
def compute_valid_mask(num_valid_elements, num_elements):
"""Computes mask of valid entries within padded context feature.
Args:
num_valid_elements: A int32 Tensor of shape [batch_size].
num_elements: An int32 Tensor.
Returns:
A boolean Tensor of the shape [batch_size, num_elements]. True means
valid and False means invalid.
"""
batch_size = num_valid_elements.shape[0]
element_idxs = tf.range(num_elements, dtype=tf.int32)
batch_element_idxs = tf.tile(element_idxs[tf.newaxis, ...], [batch_size, 1])
num_valid_elements = num_valid_elements[..., tf.newaxis]
valid_mask = tf.less(batch_element_idxs, num_valid_elements)
return valid_mask
def project_features(features, projection_dimension, is_training, normalize):
"""Projects features to another feature space.
Args:
features: A float Tensor of shape [batch_size, features_size,
num_features].
projection_dimension: A int32 Tensor.
is_training: A boolean Tensor (affecting batch normalization).
normalize: A boolean Tensor. If true, the output features will be l2
normalized on the last dimension.
Returns:
A float Tensor of shape [batch, features_size, projection_dimension].
"""
# TODO(guanhangwu) Figure out a better way of specifying the batch norm
# params.
batch_norm_params = {
"is_training": is_training,
"decay": 0.97,
"epsilon": 0.001,
"center": True,
"scale": True
}
batch_size, _, num_features = features.shape
features = tf.reshape(features, [-1, num_features])
projected_features = slim.fully_connected(
features,
num_outputs=projection_dimension,
activation_fn=tf.nn.relu6,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params)
projected_features = tf. | reshape(projected_features,
[batch_size, -1, projection_dimension])
if normalize:
projected_features = tf.math.l2_normalize(projected_features, axis=-1)
return projected_features
def attent | ion_block(input_features, context_features, bottleneck_dimension,
output_dimension, attention_temperature, valid_mask,
is_training):
"""Generic attention block.
Args:
input_features: A float Tensor of shape [batch_size, input_size,
num_input_features].
context_features: A float Tensor of shape [batch_size, context_size,
num_context_features].
bottleneck_dimension: A int32 Tensor representing the bottleneck dimension
for intermediate projections.
output_dimension: A int32 Tensor representing the last dimension of the
output feature.
attention_temperature: A float Tensor. It controls the temperature of the
softmax for weights calculation. The formula for calculation as follows:
weights = exp(weights / temperature) / sum(exp(weights / temperature))
valid_mask: A boolean Tensor of shape [batch_size, context_size].
is_training: A boolean Tensor (affecting batch normalization).
Returns:
A float Tensor of shape [batch_size, input_size, output_dimension].
"""
with tf.variable_scope("AttentionBlock"):
queries = project_features(
input_features, bottleneck_dimension, is_training, normalize=True)
keys = project_features(
context_features, bottleneck_dimension, is_training, normalize=True)
values = project_features(
context_features, bottleneck_dimension, is_training, normalize=True)
weights = tf.matmul(queries, keys, transpose_b=True)
weights, values = filter_weight_value(weights, values, valid_mask)
weights = tf.nn.softmax(weights / attention_temperature)
features = tf.matmul(weights, values)
output_features = project_features(
features, output_dimension, is_training, normalize=False)
return output_features
def compute_box_context_attention(box_features, context_features,
valid_context_size, bottleneck_dimension,
attention_temperature, is_training):
"""Computes the attention feature from the context given a batch of box.
Args:
box_features: A float Tensor of shape [batch_size, max_num_proposals,
height, width, channels]. It is pooled features from first stage
proposals.
context_features: A float Tensor of shape [batch_size, context_size,
num_context_features].
valid_context_size: A int32 Tensor of shape [batch_size].
bottleneck_dimension: A int32 Tensor representing the bottleneck dimension
for intermediate projections.
attention_temperature: A float Tensor. It controls the temperature of the
softmax for weights calculation. The formula for calculation as follows:
weights = exp(weights / temperature) / sum(exp(weights / temperature))
is_training: A boolean Tensor (affecting batch normalization).
Returns:
A float Tensor of shape [batch_size, max_num_proposals, 1, 1, channels].
"""
_, context_size, _ = context_features.shape
valid_mask = compute_valid_mask(valid_context_size, context_size)
channels = box_features.shape[-1]
# Average pools over height and width dimension so that the shape of
# box_features becomes [batch_size, max_num_proposals, cha |
kingsdigitallab/pbw-django | pbw/models.py | Python | gpl-2.0 | 46,771 | 0.001048 | import os
from django.core import serializers
from django.db import models
from django.utils.functional import cached_property
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.core.fields import RichTextField
from wagtail.core.models import Page
from .settings import DISPLAYED_FACTOID_TYPES, BASE_DIR
class Accuracy(models.Model):
# Field name made lowercase.
acckey = models.AutoField(db_column="accKey", primary_key=True)
# Field name made lowercase.
accuracyname = models.CharField(db_column="accuracyName", max_length=100)
class Meta:
app_label = "pbw"
db_table = "Accuracy"
class Activityfactoid(models.Model):
# Field name made lowercase.
factoidkey = models.IntegerField(db_column="factoidKey", primary_key=True)
# Field name made lowercase.
sourcedate = models.TextField(db_column="sourceDate", blank=True, null=True)
# Field name made lowercase.
olangkey = models.IntegerField(db_column="oLangKey")
# Field name made lowercase.
sourcedateol = models.TextField(db_column="SourceDateOL", blank=True, null=True)
tstanp = models.DateTimeField()
class Meta:
db_table = "ActivityFactoid"
class Attrdatetype(models.Model):
# Field name made lowercase.
attrdtkey = models.IntegerField(db_column="attrDTKey", primary_key=True)
# Field name made lowercase.
adtname = models.CharField(db_column="aDTName", max_length=20)
class Meta:
db_table = "AttrDateType"
class Audit(models.Model):
# Field name made lowercase.
auditkey = models.SmallIntegerField(db_column="auditKey", primary_key=True)
# Field name made lowercase.
colldbkey = models.IntegerField(db_column="CollDBKey")
# Field name made lowercase.
factoidtypekey = models.SmallIntegerField(db_column="factoidTypeKey")
# Field name made lowercase.
dcdcount = models.SmallIntegerField(db_column="DCDCount")
# Field name made lowercase.
mdbcount = models.SmallIntegerField(db_column="MDBcount")
# Field name made lowercase.
personcount = models.SmallIntegerField(db_column="personCount")
# Field name made lowercase.
subcount = models.SmallIntegerField(db_column="subCount")
problem = models.IntegerField()
class Meta:
db_table = "Audit"
class Bibliography(models.Model):
# Field name made lowercase.
bibkey = models.IntegerField(db_column="bibKey", primary_key=True)
# Field name made lowercase.
latinbib = models.TextField(db_column="latinBib", blank=True, null=True)
# Field name made lowercase.
greekbib = models.TextField(db_column="greekBib", blank=True, null=True)
reference = models.TextField(blank=True, null=True)
date = models.SmallIntegerField()
red = models.IntegerField()
# Field name made lowercase.
shortname = models.TextField(db_column="shortName", blank=True, null=True)
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return self.shortname
class Meta:
ordering = ["latinbib"]
db_table = "Bibliography"
class Boulloterion(models.Model):
# Field name made lowercase.
boulloterionkey = models.IntegerField(db_column="boulloterionKey", primary_key=True)
title = models.TextField(blank=True, null=True)
text = models.TextField(blank=True, null=True)
# Field name made lowercase.
origltext = models.TextField(db_column="origLText", blank=True, null=True)
# Field name made lowercase.
olangkey = models.TextField(db_column="oLangKey", blank=True, null=True)
# Field name made lowercase.
obvicon = models.TextField(db_column="obvIcon", blank=True, null=True)
# Field name made lowercase.
revicon = models.TextField(db_column="revIcon", blank=True, null=True)
diameter = models.TextField(blank=True, null=True)
# Field name made lowercase.
datewords = models.TextField(db_column="dateWords", blank=True, null=True)
# Field name made lowercase.
obvtypekey = models.IntegerField(db_column="obvTypeKey")
# Field name made lowercase.
revtypekey = models.CharField(db_column="revTypeKey", max_length=100)
# Field name made lowercase.
scdatekey = models.IntegerField(db_column="scDateKey")
# Field name made lowercase.
hasimage = models.IntegerField(db_column="hasImage", blank=True, null=True)
@cached_property
def get_person(self):
persons = Person.objects.filter(
factoidperson__factoidpersontype__fptypename="Primary",
factoidperson__factoid__boulloterion__pk=self.pk,
).distinct()
if persons.count() > 0:
return persons[0]
else:
return None
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return self.title
class Meta:
db_table = "Boulloterion"
ordering = ["title"]
class Boulloterionfigure(models.Model):
# Field name made lowercase.
boulloterionfigurekey = models.IntegerField(db_column="boulloterionFigureKey")
# Field name made lowercase.
boulloterionkey = models.IntegerField(db_column="boulloterionKey")
# Field name made lowercase.
figurekey = models.IntegerField(db_column="figureKey")
class Meta:
db_table = "BoulloterionFigure"
class Chronitem(models.Model):
# Field name made lowercase.
chronitemkey = models.SmallIntegerField(db_column="chronItemKey", primary_key=True)
title = models.CharField(max_length=200, blank=True, null=True)
notes = models.TextField(blank=True, null=True)
parent = models.SmallIntegerField(blank=True, null=True)
# Field name made lowercase.
chronorder = models.SmallI | ntegerField(db_column="chronOrder", b | lank=True, null=True)
lft = models.SmallIntegerField()
rgt = models.SmallIntegerField()
chrontreekey = models.SmallIntegerField(
db_column="chronTreeKey", blank=True, null=True
)
year = models.SmallIntegerField(blank=True, null=True)
datingelement = models.CharField(
db_column="datingElement", max_length=100, blank=True, null=True
) # Field name made lowercase.
class Meta:
db_table = "ChronItem"
class Chronitemfactoid(models.Model):
# Field name made lowercase.
factoidkey = models.SmallIntegerField(db_column="factoidKey", blank=True, null=True)
chronitemkey = models.SmallIntegerField(
db_column="ChronItemKey", blank=True, null=True
)
# Field name made lowercase.
chronorder = models.SmallIntegerField(db_column="chronOrder", blank=True, null=True)
class Meta:
db_table = "ChronItemFactoid"
class Chronsource(models.Model):
# Field name made lowercase.
chronsourcekey = models.SmallIntegerField(
db_column="chronSourceKey", primary_key=True
)
sourceref = models.CharField(
db_column="sourceRef", max_length=100, blank=True, null=True
)
chronitemkey = models.SmallIntegerField(
db_column="chronItemKey", blank=True, null=True
)
# Field name made lowercase.
sourcekey = models.SmallIntegerField(db_column="sourceKey", blank=True, null=True)
# Field name made lowercase.
datetypekey = models.SmallIntegerField(
db_column="dateTypeKey", blank=True, null=True
)
class Meta:
db_table = "ChronSource"
class Chrontree(models.Model):
# Field name made lowercase.
chrontreekey = models.SmallIntegerField(db_column="chronTreeKey", primary_key=True)
title = models.CharField(max_length=200, blank=True, null=True)
notes = models.TextField(blank=True, null=True)
class Meta:
db_table = "ChronTree"
class Colldb(models.Model):
# Field name made lowercase.
colldbkey = models.SmallIntegerField(db_column="collDBKey", primary_key=True)
# Field name made lowercase.
colldbid = models.CharField(db_column="collDBID", max_length=200)
researcher = models.CharField(max_length=50)
corrector = models.CharField(max_length=50)
cdbcreationdate = models.DateTimeField(
db_column="cdbCreationDate", blank=True, null=True
)
# Field name made lowercase.
cdbimportdate = m |
hrayr-artunyan/shuup | shuup/xtheme/views/extra.py | Python | agpl-3.0 | 1,769 | 0.000565 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.http.response import HttpResponseNotFound
from shuup.xtheme._theme | import get_current_theme
_VIEW_CACHE = {}
def clear_vie | w_cache(**kwargs):
_VIEW_CACHE.clear()
setting_changed.connect(clear_view_cache, dispatch_uid="shuup.xtheme.views.extra.clear_view_cache")
def _get_view_by_name(theme, view_name):
view = theme.get_view(view_name)
if hasattr(view, "as_view"): # Handle CBVs
view = view.as_view()
if view and not callable(view):
raise ImproperlyConfigured("View %r not callable" % view)
return view
def get_view_by_name(theme, view_name):
if not theme:
return None
cache_key = (theme.identifier, view_name)
if cache_key not in _VIEW_CACHE:
view = _get_view_by_name(theme, view_name)
_VIEW_CACHE[cache_key] = view
else:
view = _VIEW_CACHE[cache_key]
return view
def extra_view_dispatch(request, view):
"""
Dispatch to an Xtheme extra view.
:param request: A request
:type request: django.http.HttpRequest
:param view: View name
:type view: str
:return: A response of some ilk
:rtype: django.http.HttpResponse
"""
theme = get_current_theme(request)
view_func = get_view_by_name(theme, view)
if not view_func:
msg = "%s/%s: Not found" % (getattr(theme, "identifier", None), view)
return HttpResponseNotFound(msg)
return view_func(request)
|
ActiveState/code | recipes/Python/189745_Symmetric_datobfuscatiusing/recipe-189745.py | Python | mit | 1,786 | 0.00112 | class Obfuscator:
""" A simple obfuscator class using repeated xor """
def __init__(self, data):
self._string = data
def obfuscate(self):
"""Obfuscate a string by using repeated xor"""
out = ""
data = self._string
a0=ord(data[0])
a1=ord(data[1])
e0=chr(a0^a1)
out += e0
x=1
eprev=e0
while x<len(data):
ax=ord(data[x])
ex=chr(ax^ord(eprev))
out += ex
#throw some chaff
chaff = chr(ord(ex)^ax)
out += chaff
eprev = ex
x+=1
return out
def unobfuscate(self):
""" Reverse of obfuscation """
out = ""
data = self._string
x=len(data) - 2
while x>1:
apos=data[x]
aprevpos=data[x-2]
epos=chr(ord(apos)^ord(aprevpos))
out += epos
x -= 2
#reverse string
out2=""
x=len(out)-1
while x>=0:
out2 += out[x]
x -= 1
out=out2
#second character
e2=data[2]
a2=data[1]
a1=chr(ord(a2)^ord(e2))
a1 += out
out = a1
#first character
e1=out[0]
a1=data[0]
a0=chr(ord(a1)^ord(e1))
a0 += out
out = a0
return out
def main():
testString="Py | thon obfuscator"
obfuscator = Obfuscator(testString)
testStringObf = obfuscator.obfuscate()
print testStringObf
obfuscator = Obfuscator(tes | tStringObf)
testString = obfuscator.unobfuscate()
print testString
if __name__=="__main__":
main()
|
oomlout/oomlout-OOMP | old/OOMPpart_HEAD_I01_L_PI08_01.py | Python | cc0-1.0 | 242 | 0 | import OOMP
new | Part = OOMP.oompItem(8936)
newPart.addTag("oompType", "HEAD")
newPart.addTag("oompSize", "I01")
newPart.addTag("oompColor", "L")
newPart.addTag("oompDesc", "PI08" | )
newPart.addTag("oompIndex", "01")
OOMP.parts.append(newPart)
|
bowringchan/SDRRadio_Remote_Version | encoder.py | Python | gpl-3.0 | 3,025 | 0.01124 | #coding=utf-8
import subprocess
from time import sleep
import os
import signal
import m3u8_generator
import fifo_tool
class Batch_Encoder:
def __init__(self, fifo_tool_i):
self.fifo_tool_i = fifo_tool_i
self.output_list = []
self.output_counter = 1
self.EXTINF = 1
self.media_sequence = 1
self.thread_running = True
def encode_ffmpeg_fifo(self, encoder_ready):
#subprocess.call("ffmpeg -loop 1 -i image.jpg -f u8 -ar 48000 -channels 1 -i audio/filesink.raw -c:v libx264 -tune stillimage -pix_fmt yuv420p -ac 2 -c:a aac -f hls -hls_time 3 -hls_list_size 5 -hls_segment_filename 'audio%03d.ts' RTLSDR.m3u8",shell = True)
ffmpeg_p = subprocess.Popen('exec '+"ffmpeg -f u8 -ar 48000 -channels 1 -i ../audio/filesink.raw -c:a aac -f hls -hls_flags omit_endlist -hls_time "+str(self.EXTINF)+" -hls_list_size 3 -hls_segment_filename 'audio%03d.ts' RTLSDR.m3u8", shell=True,cwd = os.getcwd()+'/static')
encoder_ready[0] = 1
while True:
if self.thread_running == True:
sleep(1)
else:
break
ffmpeg_p.kill()
print 'ffmpeg exited\n',ffmpeg_p.pid
def encode(self):
# 1s 8bit linear PCM raw audio data take 48KB
#print 'Debug point: encode start reading'
buf = self.fifo_desc.read(48000 * self.EXTINF)
output = open('audio/audio' + str(self.output_counter) + '.raw', 'w')
output.write(buf)
output.close()
# TODO Convert And Append mp3 filename to output_list
# lame -r -s 48 --bitwidth 8 --unsigned --quiet
#ffmpeg -f u8 -ar 48000 -i audio1.raw -f mpegts output.mp3 -v 0
#subprocess.call(
# "lame -r -s 48 --bitwidth 8 --unsigned --quiet -m m audio/audio" + str(self.output_counter) + ".raw"+
# " audio/audio" + str(self.output_counter) + ".mp3",shell = True)
subprocess.call("ffmpeg -f u8 -ar 48000 -channels 1 -i audio/audio"+str(self.output_counter)+".raw -f mpegts -mpegts_copyts 1 -output_ts_offset "+ str((self.output_counter-1) * self.EXTINF)+" -b:a 128k audio/audio"+str(self.output_counter)+".mp3 -v 0",shell = True)
self.output_list.append('audio/audio' + str(self.output_counter) + '.mp3')
#if self.output_list.__len__() > 5:
#self.output_list.pop(0)
#self.media_sequence += 1
# ENTRY
def encode_mkm3u8(self):
print 'Debug point:encode Thread run\n'
self.fifo_desc = self.fifo_tool_i.open_file_for_read()
print 'Debug point:encode fifo read open\n'
while True:
if self.thread_running == True:
self.encode()
m3u8_generator_i = m3u8_generator.M3u8_Generator(self.EXTINF)
m3u8_g | enerator_i.generate(self.output_list, self.media_sequence)
self.output_co | unter += 1
else:
break
self.fifo_tool_i.delfifo_file()
subprocess.call("sudo sh clean.sh",shell = True)
|
yadudoc/cloud_kotta | command.py | Python | apache-2.0 | 6,801 | 0.010587 | #!/usr/bin/env python
import subprocess32 as subprocess
import threading
import os
import time
import dynamo_utils as dutils
import config_manager as cm
import shlex
############################################################################
# Default params
############################################################################
sleep_time = 5
USAGE_UPDATE_TIME = 120
WALLTIME_EXCEEDED = 1001
KILLED_BY_REQUEST = 1002
############################################################################
# Check dynamodb to ensure that the application has not been cancelled
############################################################################
def check_if_cancelled(app, job_id):
if not job_id :
return False
print "Statecheck"
cm.update_creds_from_metadata_server(app)
record = dutils.dynamodb_get(app.config["dyno.conn"], job_id)
if record["status"] == "cancelled":
print "Cancelled"
return True
print "Job not cancelled"
return False
def update_record(record, key, value):
record[key] = value
record.save(overwrite=True)
return
############################################################################
# Update dynamodb with usage stats
############################################################################
def update_usage_stats(app, job_id):
if not job_id :
return False
print "Updating usage_stats"
try:
cmd = ["/home/ubuntu/task_engine/system_stats.sh", "{0}".format(time.time())]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, err = proc.communicate()
except Exception as e:
print "Failed to run system_stats.sh"
print "Caught exception : {0}".format(e)
return
cm.update_creds_from_metadata_server(app)
record = dutils.dynamodb_get(app.config["dyno.conn"], job_id)
old = record.get("usage_stats", "")
current = old + out.strip('\n')
st = update_record(record, "usage_stats", current)
return
############################################################################
# Run a command
############################################################################
def execute (app, cmd, walltime, job_id, env_vars={}):
start_t = time.time()
print "RunCommand Started {0}".format(cmd)
std_out = open("STDOUT.txt", 'w')
std_err = open("STDERR.txt", 'w')
env = os.environ.copy()
for k in env_vars:
env[k] = env_vars[k]
env["TURING_JOB_ID"] = job_id
env["HOME"] = "/home/ubuntu"
start_time = time.time()
if walltime < 301 :
try:
proc = subprocess.Popen(shlex.split(cmd), stdout=std_out, stderr=std_err, env=env)
retcode = proc.wait(timeout=walltime)
return retcode
except subprocess.TimeoutExpired:
proc.kill()
return WALLTIME_EXCEEDED
except Exception, e:
print "Process exited with exception : ", e
return -1
else:
proc = subprocess.Popen(cmd, stdout=std_out, stderr=std_err, env=env, shell=True)
#time.sleep(1)
t_last_update = 0
while True:
delta = int(time.time() - start_time)
# Check if process has finished
status = proc.poll()
print status
if status == None:
print "Process is still active"
else:
print "Process exited with code {0}".format(status)
return status
if delta > walltime :
print "Process exceeded walltime limits {0} > {1}".format(delta, walltime)
proc.kill()
return WALLTIME_EXCEEDED
if check_if_cancelled(app, job_id) :
print "Termination request received. killing process"
proc.kill()
return KILLED_BY_REQUEST
# Update for the first time and | subsequently everytime when
# more than 60s has elapsed since t_last_update.
if (t_last_update == 0) or ((delta - t_last_update) | > USAGE_UPDATE_TIME) :
update_usage_stats(app, job_id)
t_last_update = delta
time.sleep(sleep_time)
total_t = time.time() - start_t
print "RunCommand Completed {0} in {1} s".format(cmd, total_t)
return total_t
############################################################################
# Run a command
############################################################################
def execute_wait (app, cmd, walltime, job_id):
start_t = time.time()
std_out = open("exec_wait.out.txt", 'w')
std_err = open("exec_wait.err.txt", 'w')
start_time = time.time()
try :
proc = subprocess.Popen(cmd, stdout=std_out, stderr=std_err, shell=True)
proc.wait()
except Exception as e:
print "Caught exception : {0}".format(e)
return -1
total_t = time.time() - start_t
print "RunCommand Completed {0}".format(cmd)
return total_t
def testing():
import config_manager as cm
app = cm.load_configs("production.conf")
cmd = {"job_id" : 123123,
"executable" : "/bin/echo",
"args" : "hello"}
job_id = "ce19ede4-da29-48e5-abcf-2eff53778333"
update_usage_stats(app, job_id)
update_usage_stats(app, job_id)
status = execute(app, "/bin/doo Hello World", 5, None)
if status == 127 :
print "Pass"
else:
print "Failed test"
status = execute(app, "/bin/sleep 0", 5, None)
if status == 127 :
print "Pass"
else:
print "Failed test"
status = execute(app, "/bin/sleep 60", 1, None)
if status == 127 :
print "Pass"
else:
print "Failed test"
status = execute(app, '/bin/echo "Hello World"; sleep 8', 10, None)
if status == 0 :
print "Pass"
else:
print "Failed test"
cmd = {"job_id" : 123123,
"executable" : "aws",
"args" : "s3 cp {0} {1}".format("./dummy50m", "s3://klab-jobs/yadu/data/dummy50m") }
print execute_wait(app, cmd, 50, "asdsada")
cmd = {"job_id" : 123123,
"executable" : "aws",
"args" : "s3 cp {0} {1}".format("./dummy500m", "s3://klab-jobs/yadu/data/dummy500m") }
print execute_wait(app, cmd, 50, "asdsada")
cmd = {"job_id" : 123123,
"executable" : "aws",
"args" : "s3 cp {0} {1}".format("./dummy1g", "s3://klab-jobs/yadu/data/dummy1g") }
print execute_wait(app, cmd, 50, "asdsada")
cmd = {"job_id" : 123123,
"executable" : "aws",
"args" : "s3 cp {0} {1}".format("./shuf.txt", "s3://klab-jobs/yadu/data/dummy1g") }
print execute_wait(app, cmd, 50, "asdsada")
if __name__ == "__main__" :
testing()
|
MartinHjelmare/home-assistant | homeassistant/components/wemo/switch.py | Python | apache-2.0 | 8,674 | 0 | """Support for WeMo switches."""
import asyncio
import logging
from datetime import datetime, timedelta
import requests
import async_timeout
from homeassistant.components.switch import SwitchDevice
from homeassistant.exceptions import PlatformNotReady
from homeassistant.util import convert
from homeassistant.const import (
STATE_OFF, STATE_ON, STATE_STANDBY, STATE_UNKNOWN)
from . import SUBSCRIPTION_REGISTRY
SCAN_INTERVAL = timedelta(seconds=10)
_LOGGER = logging.getLogger(__name__)
ATTR_SENSOR_STATE = 'sensor_state'
ATTR_SWITCH_MODE = 'switch_mode'
ATTR_CURRENT_STATE_DETAIL = 'state_detail'
ATTR_COFFEMAKER_MODE = 'coffeemaker_mode'
MAKER_SWITCH_MOMENTARY = 'momentary'
MAKER_SWITCH_TOGGLE = 'toggle'
WEMO_ON = 1
WEMO_OFF = 0
WEMO_STANDBY = 8
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up discovered WeMo switches."""
from pywemo import discovery
if discovery_info is not None:
location = discovery_info['ssdp_description']
mac = discovery_info['mac_address']
try:
device = discovery.device_from_description(location, mac)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout) as err:
_LOGGER.error("Unable to access %s (%s)", location, err)
raise PlatformNotReady
if device:
add_entities([WemoSwitch(device)])
class WemoSwitch(SwitchDevice):
"""Representation of a WeMo switch."""
def __init__(self, device):
"""Initialize the WeMo switch."""
self.wemo = device
self.insight_params = None
self.maker_params = None
self.coffeemaker_mode = None
self._state = None
self._mode_string = None
self._available = True
self._update_lock = None
self._model_name = self.wemo.model_name
self._name = self.wemo.name
self._serialnumber = self.wemo.serialnumber
def _subscription_callback(self, _device, _type, _params):
"""Update the state by the Wemo device."""
_LOGGER.info("Subscription update for %s", self.name)
updated = self.wemo.subscription_update(_type, _params)
self.hass.add_job(
self._async_locked_subscription_callback(not updated))
async def _async_locked_subscription_callback(self, force_update):
"""Handle an update from a subscription."""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
await self._async_locked_update(force_update)
self.async_schedule_update_ha_state()
@property
def unique_id(self):
"""Return the ID of this WeMo switch."""
return self._serialnumber
@property
def name(self):
"""Return the name of the switch if any."""
return self._name
@property
def device_state_attributes(self):
"""Return the state | attributes of the device."""
attr = {}
if self.maker_params:
# Is the maker sensor on or off.
if self.maker_params['hassensor']:
# Note a state of 1 matches the WeMo app 'not triggered'!
if self.maker_params['sensorstate']:
attr[ATTR_SENSOR_STATE] = STATE_OFF
else:
attr[ATTR_SENSOR_S | TATE] = STATE_ON
# Is the maker switch configured as toggle(0) or momentary (1).
if self.maker_params['switchmode']:
attr[ATTR_SWITCH_MODE] = MAKER_SWITCH_MOMENTARY
else:
attr[ATTR_SWITCH_MODE] = MAKER_SWITCH_TOGGLE
if self.insight_params or (self.coffeemaker_mode is not None):
attr[ATTR_CURRENT_STATE_DETAIL] = self.detail_state
if self.insight_params:
attr['on_latest_time'] = \
WemoSwitch.as_uptime(self.insight_params['onfor'])
attr['on_today_time'] = \
WemoSwitch.as_uptime(self.insight_params['ontoday'])
attr['on_total_time'] = \
WemoSwitch.as_uptime(self.insight_params['ontotal'])
attr['power_threshold_w'] = \
convert(
self.insight_params['powerthreshold'], float, 0.0
) / 1000.0
if self.coffeemaker_mode is not None:
attr[ATTR_COFFEMAKER_MODE] = self.coffeemaker_mode
return attr
@staticmethod
def as_uptime(_seconds):
"""Format seconds into uptime string in the format: 00d 00h 00m 00s."""
uptime = datetime(1, 1, 1) + timedelta(seconds=_seconds)
return "{:0>2d}d {:0>2d}h {:0>2d}m {:0>2d}s".format(
uptime.day-1, uptime.hour, uptime.minute, uptime.second)
@property
def current_power_w(self):
"""Return the current power usage in W."""
if self.insight_params:
return convert(
self.insight_params['currentpower'], float, 0.0
) / 1000.0
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
if self.insight_params:
miliwatts = convert(self.insight_params['todaymw'], float, 0.0)
return round(miliwatts / (1000.0 * 1000.0 * 60), 2)
@property
def detail_state(self):
"""Return the state of the device."""
if self.coffeemaker_mode is not None:
return self._mode_string
if self.insight_params:
standby_state = int(self.insight_params['state'])
if standby_state == WEMO_ON:
return STATE_ON
if standby_state == WEMO_OFF:
return STATE_OFF
if standby_state == WEMO_STANDBY:
return STATE_STANDBY
return STATE_UNKNOWN
@property
def is_on(self):
"""Return true if switch is on. Standby is on."""
return self._state
@property
def available(self):
"""Return true if switch is available."""
return self._available
@property
def icon(self):
"""Return the icon of device based on its type."""
if self._model_name == 'CoffeeMaker':
return 'mdi:coffee'
return None
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.wemo.on()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self.wemo.off()
async def async_added_to_hass(self):
"""Wemo switch added to HASS."""
# Define inside async context so we know our event loop
self._update_lock = asyncio.Lock()
registry = SUBSCRIPTION_REGISTRY
await self.hass.async_add_job(registry.register, self.wemo)
registry.on(self.wemo, None, self._subscription_callback)
async def async_update(self):
"""Update WeMo state.
Wemo has an aggressive retry logic that sometimes can take over a
minute to return. If we don't get a state after 5 seconds, assume the
Wemo switch is unreachable. If update goes through, it will be made
available again.
"""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
try:
with async_timeout.timeout(5):
await asyncio.shield(self._async_locked_update(True))
except asyncio.TimeoutError:
_LOGGER.warning('Lost connection to %s', self.name)
self._available = False
async def _async_locked_update(self, force_update):
"""Try updating within an async lock."""
async with self._update_lock:
await self.hass.async_add_job(self._update, force_update)
def _update(self, force_update):
"""Update the device state."""
try:
self._state = self.wemo.get_state(force_update)
if self._model_name == 'Insight':
self.insight_params = self.wemo.insight_params
self.insight_params['standby_state'] = (
self.wemo.get_standby_state)
elif self._model_name == 'Maker':
self.maker_params = self.wemo.maker_ |
mathemage/h2o-3 | h2o-py/h2o/grid/metrics.py | Python | apache-2.0 | 34,611 | 0.007657 | # -*- encoding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
#-----------------------------------------------------------------------------------------------------------------------
# AutoEncoder Grid Search
#-----------------------------------------------------------------------------------------------------------------------
class H2OAutoEncoderGridSearch(object):
def anomaly(self, test_data, per_feature=False):
"""
Obtain the reconstruction error for the input test_data.
:param H2OFrame test_data: The dataset upon which the reconstruction error is computed.
:param bool per_feature: Whether to return the square reconstruction error per feature. Otherwise, return
the mean square error.
:returns: the reconstruction error.
"""
return {model.model_id: model.anomaly(test_data, per_feature) for model in self.models}
#-----------------------------------------------------------------------------------------------------------------------
# Binomial Grid Search
#-----------------------------------------------------------------------------------------------------------------------
class H2OBinomialGridSearch(object):
def F1(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the F1 values for a set of thresholds for the models explored.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where
the keys are "train", "valid", and "xval".
:param List thresholds: If None, then the thresholds in this set of metrics will be used.
:param bool train: If True, return the F1 value for the training data.
:param bool valid: If True, return the F1 value for the validation data.
:param bool xval: If True, return the F1 value for each of the cross-validated splits.
:returns: Dictionary of model keys to F1 values
"""
return {model.model_id: model.F1(thresholds, train, valid, xval) for model in
self.models} # dict model key -> F1 score
def F2(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the F2 for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds
in this set of metrics will be used.
:param bool train: If train is True, then return the F2 value for the training data.
:param bool valid: If valid is True, then return the F2 value for the validation data.
:param bool xval: If xval is True, then return the F2 value for the cross validation data.
:returns: Dictionary of model keys to F2 values.
"""
return {model.model_id: model.F2(thresholds, train, valid, xval) for model in self.models}
def F0point5(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the F0.5 for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds
in this set of metrics will be used.
:param bool train: If train is True, then return the F0point5 value for the training data.
:param bool valid: If valid is True, then return the F0point5 value for the validation data.
:param bool xval: If xval is True, then return the F0point5 value for the cross validation data.
:returns: The F0point5 for this binomial model.
"""
return {model.model_id: model.F0point5(thresholds, train, valid, xval) for model in self.models}
def accuracy(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the accuracy for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds
in this set of metrics | will be used.
:param bool train: If train is True, then return the accuracy value for the training data.
:param bool valid: If valid is True, then return the accu | racy value for the validation data.
:param bool xval: If xval is True, then return the accuracy value for the cross validation data.
:returns: The accuracy for this binomial model.
"""
return {model.model_id: model.accuracy(thresholds, train, valid, xval) for model in self.models}
def error(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the error for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds
in this set of metrics will be used.
:param bool train: If train is True, then return the error value for the training data.
:param bool valid: If valid is True, then return the error value for the validation data.
:param bool xval: If xval is True, then return the error value for the cross validation data.
:returns: The error for this binomial model.
"""
return {model.model_id: model.error(thresholds, train, valid, xval) for model in self.models}
def precision(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the precision for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds
in this set of metrics will be used.
:param bool train: If train is True, then return the precision value for the training data.
:param bool valid: If valid is True, then return the precision value for the validation data.
:param bool xval: If xval is True, then return the precision value for the cross validation data.
:returns: The precision for this binomial model.
"""
return {model.model_id: model.precision(thresholds, train, valid, xval) for model in self.models}
def tpr(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the True Positive Rate for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds
in this set of metrics will be used.
:param bool train: If train is True, then return the TPR value for the training data.
:param bool valid: If valid is True, then return the TPR value for the validation data.
:param bool xval: If xval is True, then return the TPR value for the cross validation data.
:returns: The TPR for this binomial model.
"""
return {model.model_id: model.tpr(thresholds, train, valid, xval) for model in self.models}
|
sh1ng/imba | lgbm_submition.py | Python | agpl-3.0 | 14,927 | 0.005962 | import gc
import pandas as pd
import numpy as np
import os
import arboretum
import lightgbm as lgb
import json
import sklearn.metrics
from sklearn.metrics import f1_score, roc_auc_score
from sklearn.model_selection import train_test_split
from scipy.sparse import dok_matrix, coo_matrix
from sklearn.utils.multiclass import type_of_target
def fscore(true_value_matrix, prediction, order_index, product_index, rows, cols, threshold=[0.5]):
prediction_value_matrix = coo_matrix((prediction, (order_index, product_index)), shape=(rows, cols), dtype=np.float32)
# prediction_value_matrix.eliminate_zeros()
return list(map(lambda x: f1_score(true_value_matrix, prediction_value_matrix > x, average='samples'), threshold))
if __name__ == '__main__':
path = "data"
aisles = pd.read_csv(os.path.join(path, "aisles.csv"), dtype={'aisle_id': np.uint8, 'aisle': 'category'})
departments = pd.read_csv(os.path.join(path, "departments.csv"),
dtype={'department_id': np.uint8, 'department': 'category'})
order_prior = pd.read_csv(os.path.join(path, "order_products__prior.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
order_train = pd.read_csv(os.path.join(path, "order_products__train.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint1 | 6,
'add_to_cart_order': np.uint8,
'reordered': bool})
orders = pd.read_csv(os.path.join(path, "orders.csv"), dtype={'order_id': np.uint32,
| 'user_id': np.uint32,
'eval_set': 'category',
'order_number': np.uint8,
'order_dow': np.uint8,
'order_hour_of_day': np.uint8
})
products = pd.read_csv(os.path.join(path, "products.csv"), dtype={'product_id': np.uint16,
'aisle_id': np.uint8,
'department_id': np.uint8})
product_embeddings = pd.read_pickle('data/product_embeddings.pkl')
embedings = list(range(32))
product_embeddings = product_embeddings[embedings + ['product_id']]
order_train = pd.read_pickle(os.path.join(path, 'chunk_0.pkl'))
order_test = order_train.loc[order_train.eval_set == "test", ['order_id', 'product_id']]
order_train = order_train.loc[order_train.eval_set == "train", ['order_id', 'product_id', 'reordered']]
product_periods = pd.read_pickle(os.path.join(path, 'product_periods_stat.pkl')).fillna(9999)
print(order_train.columns)
###########################
prob = pd.merge(order_prior, orders, on='order_id')
print(prob.columns)
prob = prob.groupby(['product_id', 'user_id'])\
.agg({'reordered':'sum', 'user_id': 'size'})
print(prob.columns)
prob.rename(columns={'sum': 'reordered',
'user_id': 'total'}, inplace=True)
prob.reordered = (prob.reordered > 0).astype(np.float32)
prob.total = (prob.total > 0).astype(np.float32)
prob['reorder_prob'] = prob.reordered / prob.total
prob = prob.groupby('product_id').agg({'reorder_prob': 'mean'}).rename(columns={'mean': 'reorder_prob'})\
.reset_index()
prod_stat = order_prior.groupby('product_id').agg({'reordered': ['sum', 'size'],
'add_to_cart_order':'mean'})
prod_stat.columns = prod_stat.columns.levels[1]
prod_stat.rename(columns={'sum':'prod_reorders',
'size':'prod_orders',
'mean': 'prod_add_to_card_mean'}, inplace=True)
prod_stat.reset_index(inplace=True)
prod_stat['reorder_ration'] = prod_stat['prod_reorders'] / prod_stat['prod_orders']
prod_stat = pd.merge(prod_stat, prob, on='product_id')
# prod_stat.drop(['prod_reorders'], axis=1, inplace=True)
user_stat = orders.loc[orders.eval_set == 'prior', :].groupby('user_id').agg({'order_number': 'max',
'days_since_prior_order': ['sum',
'mean',
'median']})
user_stat.columns = user_stat.columns.droplevel(0)
user_stat.rename(columns={'max': 'user_orders',
'sum': 'user_order_starts_at',
'mean': 'user_mean_days_since_prior',
'median': 'user_median_days_since_prior'}, inplace=True)
user_stat.reset_index(inplace=True)
orders_products = pd.merge(orders, order_prior, on="order_id")
user_order_stat = orders_products.groupby('user_id').agg({'user_id': 'size',
'reordered': 'sum',
"product_id": lambda x: x.nunique()})
user_order_stat.rename(columns={'user_id': 'user_total_products',
'product_id': 'user_distinct_products',
'reordered': 'user_reorder_ratio'}, inplace=True)
user_order_stat.reset_index(inplace=True)
user_order_stat.user_reorder_ratio = user_order_stat.user_reorder_ratio / user_order_stat.user_total_products
user_stat = pd.merge(user_stat, user_order_stat, on='user_id')
user_stat['user_average_basket'] = user_stat.user_total_products / user_stat.user_orders
########################### products
prod_usr = orders_products.groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr.rename(columns={'user_id':'prod_users_unq'}, inplace=True)
prod_usr.reset_index(inplace=True)
prod_usr_reordered = orders_products.loc[orders_products.reordered, :].groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr_reordered.rename(columns={'user_id': 'prod_users_unq_reordered'}, inplace=True)
prod_usr_reordered.reset_index(inplace=True)
order_stat = orders_products.groupby('order_id').agg({'order_id': 'size'}) \
.rename(columns={'order_id': 'order_size'}).reset_index()
orders_products = pd.merge(orders_products, order_stat, on='order_id')
orders_products['add_to_cart_order_inverted'] = orders_products.order_size - orders_products.add_to_cart_order
orders_products['add_to_cart_order_relative'] = orders_products.add_to_cart_order / orders_products.order_size
data = orders_products.groupby(['user_id', 'product_id']).agg({'user_id': 'size',
'order_number': ['min', 'max'],
'add_to_cart_order': ['mean', 'median'],
'days_since_prior_order': ['mean', 'median'],
'order_dow': ['mean', 'median'],
'order_hour_of_day': ['mean', 'median'],
'add_to_cart_order_inverted': ['mean', 'median'],
|
benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationsamlidppolicy_binding.py | Python | apache-2.0 | 4,203 | 0.031882 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationsamlidppolicy_binding(base_resource):
""" Binding class showing the resources that can be bound to authenticationsamlidppolicy_binding.
"""
def __init__(self) :
self._name = ""
self.authenticationsamlidppolicy_vpnvserver_binding = []
self.authenticationsamlidppolicy_authenticationvserver_binding = []
@property
def name(self) :
ur"""Name of the SAML IdentityProvider (IdP) policy for which to display detailed information.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the SAML IdentityProvider (IdP) policy for which to display detailed information.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def authenticationsamlidppolicy_authenticationvserver_bindings(self) :
ur"""authenticationvserver that can be bound to authenticationsamlidppolicy.
"""
try :
return self._authenticationsamlidppolicy_authenticationvserver_binding
except Exception as e:
raise e
@property
def authenticationsamlidppolicy_vpnvserver_bindings(self) :
ur"""vpnvserver that can be bound to authenticationsamlidppolicy.
"""
try :
return self._authenticationsamlidppolicy_vpnvserver_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationsamlidppolicy_binding_response, response, self.__class__.__name__)
if(result.e | rrorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationsamlidppolicy_binding
except Exception as e :
raise e
def _get_obj | ect_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name) :
ur""" Use this API to fetch authenticationsamlidppolicy_binding resource.
"""
try :
if type(name) is not list :
obj = authenticationsamlidppolicy_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [authenticationsamlidppolicy_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class authenticationsamlidppolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.authenticationsamlidppolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationsamlidppolicy_binding = [authenticationsamlidppolicy_binding() for _ in range(length)]
|
jasonwee/asus-rt-n14uhp-mrtg | src/lesson_algorithms/itertools_zip.py | Python | apache-2.0 | 55 | 0 | for i in zip | ([1, 2, 3], [ | 'a', 'b', 'c']):
print(i)
|
dreibh/planetlab-lxc-plcapi | PLC/Methods/AddSliceToNodesWhitelist.py | Python | bsd-3-clause | 1,708 | 0.002927 | from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.Nodes import Node, Nodes
from PLC.Slices import Slice, Slices
from PLC.Auth import Auth
class AddSliceToNodesWhitelist(Method):
"""
Adds the specified slice to the whitelist on the specified nodes. Nodes may be
either local or foreign nodes.
If the slice is already associated with a node, no errors are
returned.
Returns 1 if successful, faults otherwise.
"""
roles = ['admin']
| accepts = [
Auth(),
Mixed(Slice.fields['slice_id'], |
Slice.fields['name']),
[Mixed(Node.fields['node_id'],
Node.fields['hostname'])]
]
returns = Parameter(int, '1 if successful')
def call(self, auth, slice_id_or_name, node_id_or_hostname_list):
# Get slice information
slices = Slices(self.api, [slice_id_or_name])
if not slices:
raise PLCInvalidArgument("No such slice")
slice = slices[0]
if slice['peer_id'] is not None:
raise PLCInvalidArgument("Not a local slice")
# Get specified nodes, add them to the slice
nodes = Nodes(self.api, node_id_or_hostname_list)
for node in nodes:
if node['peer_id'] is not None:
raise PLCInvalidArgument("%s not a local node" % node['hostname'])
if slice['slice_id'] not in node['slice_ids_whitelist']:
slice.add_to_node_whitelist(node, commit = False)
slice.sync()
self.event_objects = {'Node': [node['node_id'] for node in nodes],
'Slice': [slice['slice_id']]}
return 1
|
MontrealCorpusTools/Montreal-Forced-Aligner | montreal_forced_aligner/acoustic_modeling/sat.py | Python | mit | 14,192 | 0.001973 | """Class definitions for Speaker Adapted Triphone trainer"""
from __future__ import annotations
import multiprocessing as mp
import os
import re
import shutil
import subprocess
import time
from queue import Empty
from typing import Dict, List, NamedTuple
import tqdm
from montreal_forced_aligner.acoustic_modeling.triphone import TriphoneTrainer
from montreal_forced_aligner.exceptions import KaldiProcessingError
from montreal_forced_aligner.utils import (
KaldiFunction,
KaldiProcessWorker,
Stopped,
log_kaldi_errors,
parse_logs,
thirdparty_binary,
)
__all__ = ["SatTrainer", "AccStatsTwoFeatsFunction", "AccStatsTwoFeatsArguments"]
class AccStatsTwoFeatsArguments(NamedTuple):
"""Arguments for :func:`~montreal_forced_aligner.acoustic_modeling.sat.AccStatsTwoFeatsFunction`"""
log_path: str
dictionaries: List[str]
ali_paths: Dict[str, str]
acc_paths: Dict[str, str]
model_path: str
feature_strings: Dict[str, str]
si_feature_strings: Dict[str, str]
class AccStatsTwoFeatsFunction(KaldiFunction):
"""
Multiprocessing function for accumulating stats across speaker-independent and
speaker-adapted features
See Also
--------
:meth:`.SatTrainer.create_align_model`
Main function that calls this function in parallel
:meth:`.SatTrainer.acc_stats_two_feats_arguments`
Job method for generating arguments for this function
:kaldi_src:`ali-to-post`
Relevant Kaldi binary
:kaldi_src:`gmm-acc-stats-twofeats`
Relevant Kaldi binary
Parameters
----------
args: :class:`~montreal_forced_aligner.acoustic_modeling.sat.AccStatsTwoFeatsArguments`
Arguments for the function
"""
progress_pattern = re.compile(r"^LOG \(gmm-acc-stats-twofeats.* Average like for this file.*")
done_pattern = re.compile(
r"^LOG \(gmm-acc-stats-twofeats.*Done (?P<utterances>\d+) files, (?P<no_posteriors>\d+) with no posteriors, (?P<no_second_features>\d+) with no second features, (?P<errors>\d+) with other errors.$"
)
def __init__(self, args: AccStatsTwoFeatsArguments):
self.log_path = args.log_path
self.dictionaries = args.dictionaries
self.ali_paths = args.ali_paths
self.acc_paths = args.acc_paths
self.model_path = args.model_path
self.feature_strings = args.feature_strings
self.si_feature_strings = args.si_feature_strings
def run(self):
"""Run the function"""
with open(self.log_path, "w", encoding="utf8") as log_file:
for dict_name in self.dictionaries:
ali_path = self.ali_paths[dict_name]
acc_path = self.acc_paths[dict_name]
feature_string = self.feature_strings[dict_name]
si_feature_string = self.si_feature_strings[dict_name]
ali_to_post_proc = subprocess.Popen(
[thirdparty_binary("ali-to-post"), f"ark:{ali_path}", "ark:-"],
stderr=log_file,
stdout=subprocess.PIPE,
env=os.environ,
)
acc_proc = subprocess.Popen(
[
thirdparty_binary("gmm-acc-stats-twofeats"),
self.model_path,
feature_string,
si_feature_string,
"ark,s,cs:-",
acc_path,
],
stderr=subprocess.PIPE,
encoding="utf8",
stdin=ali_to_post_proc.stdout,
env=os.environ,
)
for line in acc_proc.stderr:
log_file.write(line)
m = self.progress_pattern.match(line.strip())
if m:
yield 1, 0, 0, 0
else:
m = self.done_pattern.match(line.strip())
if m:
yield int(m.group("utterances")), int(m.group("no_posteriors")), int(
m.group("no_second_features")
), int(m.group("errors"))
class SatTrainer(TriphoneTrainer):
"""
Speaker adapted trainer (SAT), inherits from TriphoneTrainer
Parameters
----------
subset : int
Number of utterances to use, defaults to 10000
num_leaves : int
Number of states in the decision tree, defaults to 2500
max_gaussians : int
Number of gaussians in the decision tree, defaults to 15000
power : float
Exponent for number of gaussians according to occurrence counts, defaults to 0.2
See Also
--------
:class:`~montreal_forced_aligner.acoustic_modeling.triphone.TriphoneTrainer`
For acoustic model training parsing parameters
Attributes
----------
fmllr_iterations : list
List of iterations to perform fMLLR calculation
"""
def __init__(
self,
subset: int = 10000,
num_leaves: int = 2500,
max_gaussians: int = 15000,
power: float = 0.2,
**kwargs,
):
super().__init__(**kwargs)
self.subset = subset
self.num_leaves = num_leaves
self.max_gaussians = max_gaussians
self.power = power
self.fmllr_iterations = []
def acc_stats_two_feats_arguments(self) -> List[AccStatsTwoFeatsArguments]:
"""
Generate Job arguments for :func:`~montreal_forced_aligner.acoustic_modeling.sat.AccStatsTwoFeatsFunction`
Returns
-------
list[:class:`~montreal_forced_aligner.acoustic_modeling.sat.AccStatsTwoFeatsArguments`]
Arguments for processing
"""
feat_strings = self.worker.construct_feature_proc_strings()
si_feat_strings = self.worker.construct_feature_proc_string | s(speaker_independent=True)
return [
AccStatsTwoFeatsArguments(
os.path.join(self.working_log_directory, f"acc_stats_two_feats.{j.name}.log"),
j.current_dictionary_names,
j.construct_path_dictionary(self.working_directory, "ali", "ark"),
j.construct_path_dictionary(self.working_directory, "two_feat_acc", "ark"),
self.model_path,
feat_strings[j.name],
si | _feat_strings[j.name],
)
for j in self.jobs
]
def calc_fmllr(self) -> None:
self.worker.calc_fmllr()
def compute_calculated_properties(self) -> None:
"""Generate realignment iterations, initial gaussians, and fMLLR iterations based on configuration"""
super().compute_calculated_properties()
self.fmllr_iterations = []
max_fmllr_iter = int(self.num_iterations / 2) - 1
for i in range(1, max_fmllr_iter):
if i < max_fmllr_iter / 2 and i % 2 == 0:
self.fmllr_iterations.append(i)
self.fmllr_iterations.append(max_fmllr_iter)
def _trainer_initialization(self) -> None:
"""Speaker adapted training initialization"""
self.speaker_independent = False
if os.path.exists(os.path.join(self.working_directory, "1.mdl")):
return
if os.path.exists(os.path.join(self.previous_aligner.working_directory, "lda.mat")):
shutil.copyfile(
os.path.join(self.previous_aligner.working_directory, "lda.mat"),
os.path.join(self.working_directory, "lda.mat"),
)
self.tree_stats()
self._setup_tree()
self.compile_train_graphs()
self.convert_alignments()
os.rename(self.model_path, self.next_model_path)
self.iteration = 1
if os.path.exists(os.path.join(self.previous_aligner.working_directory, "trans.0.ark")):
for j in self.jobs:
for path in j.construct_path_dictionary(
self.previous_aligner.working_directory, "trans", "ark"
).values():
shutil.copy(
path,
path.replace(
|
apache/incubator-systemml | src/main/python/systemds/operator/algorithm/builtin/garch.py | Python | apache-2.0 | 3,230 | 0.002477 | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/garch.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def garch(X: Matrix,
kmax: int,
momentum: float,
start_stepsize: float,
end_stepsize: float,
start_vicinity: float,
end_vicinity: float,
sim_seed: int,
verbose: bool):
"""
:param X: The input Matrix to apply Arima on.
:param kmax: Number of iterations
:param momentum: Momentum for momentum-gradient descent (set to 0 to deactivate)
:param start_stepsize: Initial gradient-descent stepsize
:param end_stepsize: gradient-descent stepsize at end (linear descent)
:param start_vicinity: proportion of randomness of restart-location for gradient descent at beginning
:param end_vicinity: same at end (linear decay)
:param sim_seed: seed for simulation of process on fitted coefficients
:param verbose: verbosity, comm | ents during fitting
:return: 'OperationNode' containing simulated garch(1,1) process on fitted coefficients & variances of simulated fitted process & constant term of fitted process & 1-st arch-coefficient of fitted process & 1-st garch-coefficient of fitted process & drawbacks: slow convergence of optimization (sort of simulated annealing/gradient descent)
"""
params_dict = {'X': X, 'kmax': kmax, 'momentum': momentum, 'start_stepsize': start_stepsize, 'end_stepsize': end_stepsize, 'start_ | vicinity': start_vicinity, 'end_vicinity': end_vicinity, 'sim_seed': sim_seed, 'verbose': verbose}
vX_0 = Matrix(X.sds_context, '')
vX_1 = Matrix(X.sds_context, '')
vX_2 = Scalar(X.sds_context, '')
vX_3 = Scalar(X.sds_context, '')
vX_4 = Scalar(X.sds_context, '')
output_nodes = [vX_0, vX_1, vX_2, vX_3, vX_4, ]
op = MultiReturn(X.sds_context, 'garch', output_nodes, named_input_nodes=params_dict)
vX_0._unnamed_input_nodes = [op]
vX_1._unnamed_input_nodes = [op]
vX_2._unnamed_input_nodes = [op]
vX_3._unnamed_input_nodes = [op]
vX_4._unnamed_input_nodes = [op]
return op
|
sam-roth/Keypad | keypad/plugins/pymodel/syntax.py | Python | gpl-3.0 | 5,339 | 0.0118 |
import re
import keyword
import logging
import builtins
from keypad.api import BufferController, autoconnect
from keypad.core.syntaxlib import SyntaxHighlighter, lazy
_python_kwlist = frozenset(keyword.kwlist) - frozenset('from import None False True'.split())
_python_builtins = frozenset(x for x in dir(builtins) if not isinstance(getattr(builtins, x), type))
_python_types = frozenset(x for x in dir(builtins) if isinstance(getattr(builtins, x), type))
@lazy
def pylexer():
from keypad.core.syntaxlib import keyword, regex, region
Keyword = keyword(_python_kwlist, dict(lexcat='keyword'))
Import = keyword('from import'.split(), dict(lexcat='keyword.modulesystem'))
Const = keyword(_python_builtins, dict(lexcat='identifier.constant'))
Type = keyword(_python_types, dict(lexcat='identifier.type'))
ESCAPE = dict(lexcat='literal.string.escape')
STRING = dict(lexcat='literal.string')
COMMENT = dict(lexcat='comment')
FUNCTION = dict(lexcat='identifier.function')
TODO = dict(lexcat='todo')
SIGIL = dict(lexcat='punctuation.sigil')
NUMBER = dict(lexcat='literal.numeric')
Todo = regex(r'\btodo:|\bfixme:|\bhack:', TODO, flags=re.IGNORECASE)
Comment = region(guard=regex('#'),
exit=regex('$'),
contains=[Todo],
attrs=COMMENT)
HEX = r'[a-fA-F0-9]'
Esc1 = regex(r'''\\[abfnrtv'"\\]''', ESCAPE)
Esc2 = regex(r'''\\\[0-7]{1,3}''', ESCAPE)
Esc3 = regex(r'''\\x[a-fA-F0-9]{2}''', ESCAPE)
Esc4 = regex(r'\\u' + HEX + r'{4}|\\U' + HEX + '{8}', ESCAPE)
Esc5 = regex(r'\\N\{[a-zA-Z]+(?:\s[a-zA-Z]+)*}', ESCAPE)
Esc6 = regex(r'\\$', ESCAPE)
DQDoctest = region(
guard=regex(r'^\s*>>>\s'),
exit=regex(r'$|(?=""")'),
contains=(),
attrs=ESCAPE
)
SQDoctest = region(
guard=regex(r'^\s*>>>\s'),
exit=regex(r"$|(?=''')"),
contains=(),
attrs=ESCAPE
)
Escs = [Esc1, Esc2, Esc3, Esc4, Esc5, Esc6]
DQString = region(
guard=regex(r'"(?!"")'),
exit=regex(r'"'),
contains=Escs,
attrs=STRING
)
SQString = region(
guard=regex(r"'(?!'')"),
exit=regex(r"'"),
contains=Escs,
attrs=STRING
)
TDQString = region(
guard=regex(r'"""'),
exit=regex(r'"""'),
contains=Escs + [DQDoctest],
attrs=STRING
)
TSQString = region(
guard=regex(r"'''"),
exit=regex(r"'''"),
contains=Escs + [SQDoctest],
attrs=STRING
)
def make_raw_string(quote):
return region(
guard=regex(r"r" + quote),
exit=regex(r"\\\\" + quote + "|" + r"(?<!\\)" + quote),
contains=[regex(r"(?<!\\)\\" + quote, ESCAPE)],
attrs=STRING
)
RSQString = make_raw_string("'")
RDQString = make_raw_string('"')
RTSQString = make_raw_string("'''")
RTDQString = make_raw_string('"""')
FloatLiteral = regex(r'\b\d*\.\d+', NUMBER)
IntLiteral = regex(r'\b\d+L?', NUMBER)
HexLiteral = regex(r'\b0x' + HEX + r'+L?', NUMBER)
OctLiteral = regex(r'\b0o[0-7]+L?', NUMBER)
BinLiteral = regex(r'\b0b[01]+L?', NUMBER)
FuncDef = regex(r'(?:(?<=\bdef)|(?<=\bclass)|(?<=@))\s+\w+', FUNCTION)
Deco = regex(r'(?<=@)\s*[\w.]+', FUNCTION)
CommAt = regex(re.escape('@'), SIGIL)
PythonLexers = [
Keyword,
Const,
Import,
DQString,
SQString,
TDQString,
TSQString,
RSQString,
RDQString,
IntLiteral,
HexLiteral,
OctLiteral,
BinLiteral,
FloatLiteral,
Comment,
FuncDef,
CommAt,
RTSQString,
RTDQString,
Deco,
Type
]
DQDoctest.contains = tuple(PythonLexers)
SQDoctest.contains = tuple(PythonLexers)
Python = region(
guard=None,
exit=None,
| contains=PythonLexers
)
return Python
@autoconnect(BufferController.buffer_needs_highlight,
lambda tags: tags.get('syntax') == 'python')
def python_syntax_high | lighting(controller):
highlighter = SyntaxHighlighter('keypad.plugins.pycomplete.syntax', pylexer(), dict(lexcat=None))
highlighter.highlight_buffer(controller.buffer)
def main():
from keypad.plugins.semantics.syntaxlib import Tokenizer
from keypad.core import AttributedString
from keypad.buffers import Buffer
buf = Buffer()
buf.insert((0,0), "'\\b")
highlighter = SyntaxHighlighter('h', pylexer(), dict(lexcat=None))
highlighter.highlight_buffer(buf)
print(buf.lines[0])
if __name__ == '__main__':
main()
|
nachandr/cfme_tests | cfme/tests/cloud_infra_common/test_vm_ownership.py | Python | gpl-2.0 | 10,161 | 0.001279 | import fauxfactory
import pytest
from cfme import test_requirements
from cfme.base.credential import Credential
from cfme.cloud.provider import CloudProvider
from cfme.exceptions import ItemNotFound
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils.blockers import BZ
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
pytestmark = [
test_requirements.ownership,
pytest.mark.tier(3),
pytest.mark.provider(
[CloudProvider, InfraProvider],
scope='module',
required_fields=[['templates', 'small_template']] # default for create_on_provider
),
pytest.mark.usefixtures('setup_provider_modscope'),
]
@pytest.fixture(scope="module")
def vm_crud(provider):
collection = provider.appliance.provider_based_collection(provider)
vm = collection.instantiate(random_vm_name(context='ownrs'), provider)
try:
vm.create_on_provider(find_in_cfme=True, allow_skip="default")
except KeyError:
msg = f'Missing template for provider {provider.key}'
logger.exception(msg)
pytest.skip(msg)
yield vm
try:
vm.cleanup_on_provider()
except Exception:
logger.exception('Exception deleting test vm "%s" on %s', vm.name, provider.name)
@pytest.fixture(scope="module")
def role_only_user_owned(appliance):
appliance.server.login_admin()
role = appliance.collections.roles.create(
name=fauxfactory.gen_alphanumeric(25, start="role_only_user_owned_"),
vm_restriction='Only User Owned'
)
yield role
appliance.server.login_admin()
role.delete()
@pytest.fixture(scope="module")
def group_only_user_owned(appliance, role_only_user_owned):
group_collection = appliance.collections.groups
group = group_collection.create(
description=fauxfacto | ry.gen_alphanu | meric(25, start="group_only_user_owned_"),
role=role_only_user_owned.name)
yield group
appliance.server.login_admin()
group.delete()
@pytest.fixture(scope="module")
def role_user_or_group_owned(appliance):
appliance.server.login_admin()
role = appliance.collections.roles.create(
name=fauxfactory.gen_alphanumeric(30, start="role_user_or_group_owned_"),
vm_restriction='Only User or Group Owned'
)
yield role
appliance.server.login_admin()
role.delete()
@pytest.fixture(scope="module")
def group_user_or_group_owned(appliance, role_user_or_group_owned):
group_collection = appliance.collections.groups
group = group_collection.create(
description=fauxfactory.gen_alphanumeric(30, start="group_user_or_group_owned_"),
role=role_user_or_group_owned.name)
yield group
appliance.server.login_admin()
group.delete()
def new_credential():
# BZ1487199 - CFME allows usernames with uppercase chars which blocks logins
if BZ.bugzilla.get_bug(1487199).is_opened:
return Credential(
principal=fauxfactory.gen_alphanumeric(start="uid").lower(),
secret='redhat'
)
else:
return Credential(principal=fauxfactory.gen_alphanumeric(start="uid"), secret='redhat')
@pytest.fixture(scope="module")
def user1(appliance, group_only_user_owned):
user1 = new_user(appliance, group_only_user_owned)
yield user1
appliance.server.login_admin()
user1.delete()
@pytest.fixture(scope="module")
def user2(appliance, group_only_user_owned):
user2 = new_user(appliance, group_only_user_owned)
yield user2
appliance.server.login_admin()
user2.delete()
@pytest.fixture(scope="module")
def user3(appliance, group_user_or_group_owned):
user3 = new_user(appliance, group_user_or_group_owned)
yield user3
appliance.server.login_admin()
user3.delete()
def new_user(appliance, group_only_user_owned):
user = appliance.collections.users.create(
name=fauxfactory.gen_alphanumeric(start="user_"),
credential=new_credential(),
email=fauxfactory.gen_email(),
groups=[group_only_user_owned],
cost_center='Workload',
value_assign='Database'
)
return user
def check_vm_exists(vm_ownership):
""" Checks if VM exists through All Instances tab.
Args:
vm_ownership: VM object for ownership test
Returns:
:py:class:`bool`
"""
try:
vm_ownership.find_quadicon(from_any_provider=True)
return True
except ItemNotFound:
return False
def test_form_button_validation(user1, vm_crud):
"""Tests group ownership
Metadata:
test_flag: rbac
Polarion:
assignee: spusater
caseimportance: medium
casecomponent: Appliance
initialEstimate: 1/4h
"""
# Reset button test
vm_crud.set_ownership(user=user1, reset=True)
# Cancel button test
vm_crud.set_ownership(user=user1, cancel=True)
# Save button test
vm_crud.set_ownership(user=user1)
# Unset the ownership
vm_crud.unset_ownership()
def test_user_ownership_crud(user1, vm_crud):
"""Tests user ownership
Metadata:
test_flag: rbac
Polarion:
assignee: spusater
caseimportance: medium
casecomponent: Appliance
initialEstimate: 1/4h
"""
# Set the ownership and checking it
vm_crud.set_ownership(user=user1)
with user1:
assert vm_crud.exists, "vm not found"
vm_crud.unset_ownership()
with user1:
assert not check_vm_exists(vm_crud), "vm exists! but shouldn't exist"
def test_group_ownership_on_user_only_role(user2, vm_crud):
"""Tests group ownership
Metadata:
test_flag: rbac
Polarion:
assignee: spusater
caseimportance: medium
casecomponent: Appliance
initialEstimate: 1/4h
"""
# user is only a member of a single group so it will always be the current group
vm_crud.set_ownership(group=user2.groups[0])
with user2:
assert not check_vm_exists(vm_crud), "vm exists! but shouldn't exist"
vm_crud.set_ownership(user=user2)
with user2:
assert vm_crud.exists, "vm exists"
def test_group_ownership_on_user_or_group_role(user3, vm_crud):
"""Tests group ownership
Metadata:
test_flag: rbac
Polarion:
assignee: spusater
caseimportance: medium
casecomponent: Appliance
initialEstimate: 1/4h
"""
# user is only a member of a single group so it will always be the current group
vm_crud.set_ownership(group=user3.groups[0])
with user3:
assert vm_crud.exists, "vm not found"
vm_crud.unset_ownership()
with user3:
assert not check_vm_exists(vm_crud), "vm exists! but shouldn't exist"
@pytest.mark.provider([VMwareProvider], scope="module")
@pytest.mark.meta(blockers=[BZ(1622952, forced_streams=['5.10'])])
def test_template_set_ownership(appliance, vm_crud):
""" Sets ownership to an infra template.
First publishes a template from a VM, then tries to unset an ownership of that template,
then sets it back and in the end removes the template.
VM is removed via fixture.
Tests BZ 1446801 in RHCF3-14353
Polarion:
assignee: spusater
casecomponent: Infra
caseimportance: medium
initialEstimate: 1/6h
"""
# setup the test
# publish a vm to a template
template = vm_crud.publish_to_template(template_name=random_vm_name(context='ownrs'))
# instantiate a user representing no owner
user_no_owner = appliance.collections.users.instantiate(name="<No Owner>")
# instantiate a user representing Administrator
user_admin = appliance.collections.users.instantiate(name="Administrator")
# run the test
try:
# unset ownership
template.set_ownership(user=user_no_owner)
# set ownership back to admin
template.set_ownership(user=user_admin)
finally:
# in every case, delete template we created
template.mgmt.delete()
@pytest.mark.manual
@test_requirements.ownership
@pytest.mark.tier(1)
def test_set_ownership_back_to_default( |
jiaphuan/models | research/tcn/utils/luatables.py | Python | apache-2.0 | 2,393 | 0.006686 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this | file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific la | nguage governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long,g-explicit-length-test
"""A convenience class replicating some lua table syntax with a python dict.
In general, should behave like a dictionary except that we can use dot notation
to access keys. Users should be careful to only provide keys suitable for
instance variable names.
Nota bene: do not use the key "keys" since it will collide with the method keys.
Usage example:
>>> t = T(a=5,b='kaw', c=T(v=[],x=33))
>>> t.a
5
>>> t.z = None
>>> print t
T(a=5, z=None, c=T(x=33, v=[]), b='kaw')
>>> t2 = T({'h':'f','x':4})
>>> t2
T(h='f', x=4)
>>> t2['x']
4
"""
class T(object):
"""Class for emulating lua tables."""
def __init__(self, *args, **kwargs):
if len(args) > 1 or (len(args) == 1 and len(kwargs) > 0):
errmsg = '''constructor only allows a single dict as a positional
argument or keyword arguments'''
raise ValueError(errmsg)
if len(args) == 1 and isinstance(args[0], dict):
self.__dict__.update(args[0])
else:
self.__dict__.update(kwargs)
def __repr__(self):
fmt = ', '.join('%s=%s' for i in range(len(self.__dict__)))
kwargstr = fmt % tuple(
x for tup in self.__dict__.items() for x in [str(tup[0]), repr(tup[1])])
return 'T(' + kwargstr + ')'
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
def __delitem__(self, key):
del self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def keys(self): # Needed for dict(T( ... )) to work.
return self.__dict__.keys()
def iteritems(self):
return self.__dict__.iteritems()
|
globocom/database-as-a-service | dbaas/admin/templatetags/config_tags.py | Python | bsd-3-clause | 453 | 0 | from django import template
from system.models | import Configuration
register = template.Library()
@register.assignment_tag
def get_config(conf_name=None):
if conf_name is None:
raise Exception("Invalid config name")
c = Configuration.get_by_name_all_fields(conf_name)
| if not c:
return None
return {
"name": c.name,
"value": c.value,
"description": c.description,
"hash": c.hash
}
|
MiiRaGe/cryptanalysis-tools | vigenere/PyGenere.py | Python | gpl-2.0 | 13,863 | 0.012479 | class Caesar(str):
"""An implementation of the Caesar cipher."""
def encipher(self, shift):
"""Encipher input (plaintext) using the Caesar cipher and return it
(ciphertext)."""
ciphertext = []
for p in self:
if p.isalpha():
ciphertext.append(chr((ord(p) - ord('Aa'[int(p.islower())]) +
shift) % 26 + ord('Aa'[int(p.islower())])))
else:
ciphertext.append(p)
return Caesar(''.join(ciphertext))
def decipher(self, shift):
"""Decipher input (ciphertext) using the Caesar cipher and return it
(plaintext)."""
return self.encipher(-shift)
class Vigenere(str):
"""An implementation of the Vigenere cipher."""
def encipher(self, key):
"""Encipher input (plaintext) using the Vigenere cipher and return
it (ciphertext)."""
ciphertext = []
k = 0
n = len(key)
for i in range(len(self)):
p = self[i]
if p.isalpha():
ciphertext.append(chr((ord(p) + ord(
(key[k % n].upper(), key[k % n].lower())[int(p.islower())]
) - 2*ord('Aa'[int(p.islower())])) % 26 +
ord('Aa'[int(p.islower())])))
k += 1
else:
ciphertext.append(p)
return Vigenere(''.join(ciphertext))
def decipher(self, key):
"""Decipher input (ciphertext) using the Vigenere cipher and return
it (plaintext)."""
plaintext = []
k = 0
n = len(key)
for i in range(len(self)):
c = self[i]
if c.isalpha():
plaintext.append(chr((ord(c) - ord(
(key[k % n].upper(), key[k % n].lower())[int(c.islower())]
)) % 26 + ord('Aa'[int(c.islower())])))
k += 1
else:
plaintext.append(c)
return Vigenere(''.join(plaintext))
class InputError(Exception):
"""This class is only used for throwing exceptions if the user supplies
invalid input (e.g. ciphertext is an empty string)."""
pass
class VigCrack(Vigenere):
"""
VigCrack objects have methods to break Vigenere-encoded texts when the
original key is unknown.
The techniq | ue used is based on the one desc | ribed in:
http://www.stonehill.edu/compsci/Shai_papers/RSA.pdf
(pages 9-10)
Character frequencies taken from:
http://www.csm.astate.edu/~rossa/datasec/frequency.html (English)
http://www.characterfrequency.com/ (French, Italian, Portuguese, Spanish)
http://www.santacruzpl.org/readyref/files/g-l/ltfrqger.shtml (German)
"""
# Unless otherwise specified, test for codewords between (and including)
# these two lengths:
__default_min_codeword_length = 5
__default_max_codeword_length = 9
# The following are language-specific data on character frequencies.
# Kappa is the "index of coincidence" described in the cryptography paper
# (link above).
__english_data = {
'A':8.167, 'B':1.492, 'C':2.782, 'D':4.253, 'E':12.702,
'F':2.228, 'G':2.015, 'H':6.094, 'I':6.996, 'J':0.153,
'K':0.772, 'L':4.025, 'M':2.406, 'N':6.749, 'O':7.507,
'P':1.929, 'Q':0.095, 'R':5.987, 'S':6.327, 'T':9.056,
'U':2.758, 'V':0.978, 'W':2.360, 'X':0.150, 'Y':1.974,
'Z':0.074, 'max_val':12.702, 'kappa':0.0667
}
__french_data = {
'A':8.11, 'B':0.903, 'C':3.49, 'D':4.27, 'E':17.22,
'F':1.14, 'G':1.09, 'H':0.769, 'I':7.44, 'J':0.339,
'K':0.097, 'L':5.53, 'M':2.89, 'N':7.46, 'O':5.38,
'P':3.02, 'Q':0.999, 'R':7.05, 'S':8.04, 'T':6.99,
'U':5.65, 'V':1.30, 'W':0.039, 'X':0.435, 'Y':0.271,
'Z':0.098, 'max_val':17.22, 'kappa':0.0746
}
__german_data = {
'A':6.506, 'B':2.566, 'C':2.837, 'D':5.414, 'E':16.693,
'F':2.044, 'G':3.647, 'H':4.064, 'I':7.812, 'J':0.191,
'K':1.879, 'L':2.825, 'M':3.005, 'N':9.905, 'O':2.285,
'P':0.944, 'Q':0.055, 'R':6.539, 'S':6.765, 'T':6.742,
'U':3.703, 'V':1.069, 'W':1.396, 'X':0.022, 'Y':0.032,
'Z':1.002, 'max_val':16.693, 'kappa':0.0767
}
__italian_data = {
'A':11.30, 'B':0.975, 'C':4.35, 'D':3.80, 'E':11.24,
'F':1.09, 'G':1.73, 'H':1.02, 'I':11.57, 'J':0.035,
'K':0.078, 'L':6.40, 'M':2.66, 'N':7.29, 'O':9.11,
'P':2.89, 'Q':0.391, 'R':6.68, 'S':5.11, 'T':6.76,
'U':3.18, 'V':1.52, 'W':0.00, 'X':0.024, 'Y':0.048,
'Z':0.958, 'max_val':11.57, 'kappa':0.0733
}
__portuguese_data = {
'A':13.89, 'B':0.980, 'C':4.18, 'D':5.24, 'E':12.72,
'F':1.01, 'G':1.17, 'H':0.905, 'I':6.70, 'J':0.317,
'K':0.0174, 'L':2.76, 'M':4.54, 'N':5.37, 'O':10.90,
'P':2.74, 'Q':1.06, 'R':6.67, 'S':7.90, 'T':4.63,
'U':4.05, 'V':1.55, 'W':0.0104, 'X':0.272, 'Y':0.0165,
'Z':0.400, 'max_val':13.89, 'kappa':0.0745
}
__spanish_data = {
'A':12.09, 'B':1.21, 'C':4.20, 'D':4.65, 'E':13.89,
'F':0.642, 'G':1.11, 'H':1.13, 'I':6.38, 'J':0.461,
'K':0.038, 'L':5.19, 'M':2.86, 'N':7.23, 'O':9.58,
'P':2.74, 'Q':1.37, 'R':6.14, 'S':7.43, 'T':4.49,
'U':4.53, 'V':1.05, 'W':0.011, 'X':0.124, 'Y':1.14,
'Z':0.324, 'max_val':13.89, 'kappa':0.0766
}
# The default language is set to English.
__lang = 'EN'
__lang_data = __english_data
# This method sets the lang (__lang) attribute of a VigCrack object.
def set_language(self, language):
self.__lang = language.upper()
if self.__lang == 'DE':
self.__lang_data = self.__german_data
elif self.__lang == 'ES':
self.__lang_data = self.__spanish_data
elif self.__lang == 'FR':
self.__lang_data = self.__french_data
elif self.__lang == 'IT':
self.__lang_data = self.__italian_data
elif self.__lang == 'PT':
self.__lang_data = self.__portuguese_data
else:
self.__lang = 'EN'
return self
# Rotate text n places to the right, wrapping around at the end.
def __rotate_right(self, n):
cutting_point = len(self) - (n % len(self))
return self[cutting_point:] + self[:cutting_point]
# Get every nth char from a piece of text, from a given starting position.
def __get_every_nth_char(self, start, n):
accumulator = []
for i in range(len(self)):
if (i % n) == start:
accumulator.append(self[i])
return VigCrack(''.join(accumulator)).set_language(self.__lang)
# Build a dictionary containing the number of occurrences of each char.
def __count_char_freqs(self):
dictionary = {}
self = self.upper()
for char in self:
if char.isalpha():
dictionary[char] = dictionary.get(char, 0) + 1
return dictionary
# Scale the dictionary so that it can be compared with __lang_data.
def __scale(self, dictionary):
v = dictionary.values()
v = sorted(v)
max_val = v[-1]
scaling_factor = self.__lang_data['max_val']/max_val
for (k, v) in dictionary.items():
dictionary[k] = v*scaling_factor
return dictionary
# The residual error is the difference between a char's frequency in
# __lang_data and its frequency in the scaled dictionary from above.
# The error is then squared to remove a possible negative value.
def __sum_r |
skhal/performance | python/utility/timer.py | Python | mit | 901 | 0.00222 | #!/usr/bin/env python
'''
Created by Samvel Khalatyan, May 01, 2012
Copyright 2012, All rights reserved
' | ''
from __future__ import division
import time
class Timer(object):
def __init__(self):
self._calls | = 0
self._elapsed = 0
self._start = None
def start(self):
if not self._start:
self._start = time.clock()
def stop(self):
if self._start:
self._elapsed += time.clock() - self._start
self._calls += 1
self._start = None
def calls(self):
return self._calls
def elapsed(self):
return self._elapsed
def __str__(self):
return "avg: {avg:.3f} mkS calls: {calls} total: {total:.6f} S".format(
avg=self.elapsed() / self.calls() * 1e6 if self.calls() else 0,
calls=self.calls(),
total=self.elapsed())
|
google/compare-codecs | lib/file_codec.py | Python | apache-2.0 | 7,774 | 0.007718 | #!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A base class for all codecs using encode-to-file."""
import encoder
import filecmp
import json
import os
import re
import subprocess
class FileCodec(encoder.Codec):
"""Base class for file-using codecs.
Subclasses MUST define:
- EncodeCommandLine
- DecodeCommandLine
- ResultData
"""
def __init__(self, name, formatter=None):
super(FileCodec, self).__init__(name, formatter=formatter)
self.extension = 'must-have-extension'
def _EncodeFile(self, parameters, bitrate, videofile, encodedfile):
commandline = self.EncodeCommandLine(
parameters, bitrate, videofile, encodedfile)
print commandline
with open(os.path.devnull, 'r') as nullinput:
times_start = os.times()
returncode = subprocess.call(commandline, shell=True, stdin=nullinput)
times_end = os.times()
subprocess_cpu = times_end[2] - times_start[2]
elapsed_clock = times_end[4] - times_start[4]
print "Encode took %f CPU seconds %f clock seconds" % (
subprocess_cpu, elapsed_clock)
if returncode:
raise Exception("Encode failed with returncode %d" % returncode)
return (subprocess_cpu, elapsed_clock)
def _DecodeFile(self, videofile, encodedfile, workdir):
tempyuvfile = os.path.join(workdir,
videofile.basename + 'tempyuvfile.yuv')
if os.path.isfile(tempyuvfile):
print "Removing tempfile before decode:", tempyuvfile
os.unlink(tempyuvfile)
commandline = self.DecodeCommandLine(videofile, encodedfile, tempyuvfile)
print commandline
with open(os.path.devnull, 'r') as nullinput:
subprocess_cpu_start = os.times()[2]
returncode = subprocess.call(commandline, shell=True,
stdin=nullinput)
if returncode:
raise Exception('Decode failed with returncode %d' % returncode)
subprocess_cpu = os.times()[2] - subprocess_cpu_start
print "Decode took %f seconds" % subprocess_cpu
commandline = encoder.Tool("psnr") + " %s %s %d %d 9999" % (
videofile.filename, tempyuvfile, videofile.width,
videofile.height)
print commandline
psnr = subprocess.check_output(commandline, shell=True, stdin=nullinput)
commandline = ['md5sum', tempyuvfile]
md5 = subprocess.check_output(commandline, shell=False)
yuv_md5 = md5.split(' ')[0]
os.unlink(tempyuvfile)
return psnr, subprocess_cpu, yuv_md5
def Execute(self, parameters, bitrate, videofile, workdir):
encodedfile = os.path.join(workdir,
'%s.%s' % (videofile.basename, self.extension))
subprocess_cpu, elapsed_clock = self._EncodeFile(parameters, bitrate,
videofile, encodedfile)
result = {}
result['encode_cputime'] = subprocess_cpu
result['encode_clocktime'] = elapsed_clock
result['encoder_version'] = self.EncoderVersion()
bitrate = videofile.MeasuredBitrate(os.path.getsize(encodedfile))
psnr, decode_cputime, yuv_md5 = self._DecodeFile(
videofile, encodedfile, workdir)
result['decode_cputime'] = decode_cputime
result['yuv_md5'] = yuv_md5
print "Bitrate", bitrate, "PSNR", psnr
result['bitrate'] = int(bitrate)
result['psnr'] = float(psnr)
result['cliptime'] = videofile.ClipTime()
result.update(self.ResultData(encodedfile))
return result
# Below are the fallback implementations of the interfaces
# that the subclasses have to implement.
def EncodeCommandLine(self, parameters, bitrate, videofile, encodedfile):
"""This function returns the command line that should be executed
in order to turn an YUV file into an encoded file."""
# pylint: disable=W0613,R0201
raise encoder.Error('EncodeCommandLine not | defined')
def DecodeCommandLine(self, videofile, encodedfile, yuvfile):
"""This function returns the command line that should be executed
in order to turn an en | coded file into an YUV file."""
# pylint: disable=W0613,R0201
raise encoder.Error('DecodeCommandLine not defined')
def ResultData(self, encodedfile):
"""Returns additional fields that the codec may know how to generate."""
# pylint: disable=W0613,R0201
return {}
def VerifyEncode(self, parameters, bitrate, videofile, workdir):
"""Returns true if a new encode of the file gives exactly the same file."""
old_encoded_file = '%s/%s.%s' % (workdir, videofile.basename,
self.extension)
if not os.path.isfile(old_encoded_file):
raise encoder.Error('Old encoded file missing: %s' % old_encoded_file)
new_encoded_file = '%s/%s_verify.%s' % (workdir, videofile.basename,
self.extension)
self._EncodeFile(parameters, bitrate, videofile,
new_encoded_file)
if not VideoFilesEqual(old_encoded_file, new_encoded_file, self.extension):
# If there is a difference, we leave the new encoded file so that
# they can be compared by hand if desired.
return False
os.unlink(new_encoded_file)
return True
def EncoderVersion(self):
raise encoder.Error('File codecs must define their own version')
# Tools that may be called upon by the codec implementation if needed.
def MatroskaFrameInfo(encodedfile):
# Run the mkvinfo tool across the file to get frame size info.
commandline = 'mkvinfo -v %s' % encodedfile
print commandline
mkvinfo = subprocess.check_output(commandline, shell=True)
frameinfo = []
for line in mkvinfo.splitlines():
match = re.search(r'Frame with size (\d+)', line)
if match:
# The mkvinfo tool gives frame size in bytes. We want bits.
frameinfo.append({'size': int(match.group(1))*8})
return frameinfo
def FfmpegFrameInfo(encodedfile):
# Uses the ffprobe tool to give frame info.
commandline = '%s -loglevel warning -show_frames -of json %s' % (
encoder.Tool('ffprobe'), encodedfile)
ffprobeinfo = subprocess.check_output(commandline, shell=True)
probeinfo = json.loads(ffprobeinfo)
previous_position = 0
frameinfo = []
for frame in probeinfo['frames']:
current_position = int(frame['pkt_pos'])
if previous_position != 0:
frameinfo.append({'size': 8 * (current_position - previous_position)})
previous_position = current_position
frameinfo.append({'size': 8 *
(os.path.getsize(encodedfile) - previous_position)})
return frameinfo
def VideoFilesEqual(old_encoded_file, new_encoded_file, extension):
if extension == 'webm':
# Matroska files contain UIDs that vary even if the video content
# is the same. So we must use vpxdec --md5 instead.
old_checksum = subprocess.check_output((encoder.Tool('vpxdec'),
'--md5',
old_encoded_file))
new_checksum = subprocess.check_output((encoder.Tool('vpxdec'),
'--md5',
new_encoded_file))
return old_checksum == new_checksum
else:
return filecmp.cmp(old_encoded_file, new_encoded_file)
|
pbfy0/visvis.dev | functions/colorbar.py | Python | bsd-3-clause | 531 | 0.013183 | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
import visvis as vv
def colorbar(axes=None):
""" colorbar(axes=N | one)
Attach a colorbar to the given axes (or the current axes if
not given). The reference to the colorbar instance is returned.
Also see the vv.ColormapEditor wibject.
"""
if axe | s is None:
axes = vv.gca()
return vv.Colorbar(axes)
|
tomspur/shedskin | examples/mwmatching.py | Python | gpl-3.0 | 32,668 | 0.003459 | """Weighted maximum matching in general graphs.
The algorithm is taken from "Efficient Algorithms for Finding Maximum
Matching in Graphs" by Zvi Galil, ACM Computing Surveys, 1986.
It is based on the "blossom" method for finding augmenting paths and
the "primal-dual" method for finding a matching of maximum weight, both
due to Jack Edmonds.
Some ideas came from "Implementation of algorithms for maximum matching
on non-bipartite graphs" by H.J. Gabow, Standford Ph.D. thesis, 1973.
A C program for maximum weight matching by Ed Rothberg was used extensively
to validate this new code.
Copyright 2009 Joris van Rantwijk, license GPL2 or later
"""
# If assigned, DEBUG(str) is called with lots of debug messages.
from sys import stderr
def debug(s):
print >>stderr, 'DEBUG:', s
DEBUG = debug
DEBUG = None
# Check delta2/delta3 computation after every substage;
# only works on integer weights, slows down the algorithm to O(n^4).
CHECK_DELTA = False
# Check optimality of solution before returning; only works on integer weights.
CHECK_OPTIMUM = True
# read input
input = file('testdata/bench_mwmatching_2039_250_a.gr')
s = input.next().split()
#s = raw_input().split()
assert s[0] == 'p' and s[1] == 'edge'
edges = []
for i in xrange(int(s[3])):
#s = raw_input().split()
s = input.next().split()
assert len(s) == 4 and s[0] == 'e'
edges.append((int(s[1]), int(s[2]), int(s[3])))
maxcardinality = True
"""Compute a maximum-weighted matching in the general undirected
weighted graph given by "edges". If "maxcardinality" is true,
only maximum-cardinality matchings are considered as solutions.
Edges is a sequence of tuples (i, j, wt) describing an undirected
edge between vertex i and vertex j with weight wt. There is at most
one edge between any two vertices; no vertex has an edge to itself.
Vertices are identified by consecutive, non-negative integers.
Return a list "mate", such that mate[i] == j if vertex i is
matched to vertex j, and mate[i] == -1 if vertex i is not matched.
This function takes time O(n ** 3)."""
#
# Vertices are numbered 0 .. (nvertex-1).
# Non-trivial blossoms are numbered nvertex .. (2*nvertex-1)
#
# Edges are numbered 0 .. (nedge-1).
# Edge endpoints are numbered 0 .. (2*nedge-1), such that endpoints
# (2*k) and (2*k+1) both belong to edge k.
#
# Many terms used in the comments (sub-blossom, T-vertex) come from
# the paper by Galil; read the paper before reading this code.
#
# Count vertices.
nedge = len(edges)
nvertex = 0
for (i, j, w) in edges:
assert i >= 0 and j >= 0 and i != j
if i >= nvertex:
nvertex = i + 1
if j >= nvertex:
nvertex = j + 1
# Find the maximum edge weight.
maxweight = max(0, max([ wt for (i, j, wt) in edges ]))
# If p is an edge endpoint,
# endpoint[p] is the vertex to which endpoint p is attached.
# Not modified by the algorithm.
endpoint = [ edges[p//2][p%2] for p in xrange(2*nedge) ]
# If v is a vertex,
# neighbend[v] is the list of remote endpoints of the edges attached to v.
# Not modified by the algorithm.
neighbend = [ [ ] for i in xrange(nvertex) ]
for k in xrange(len(edges)):
(i, j, w) = edges[k]
neighbend[i].append(2*k+1)
neighbend[j].append(2*k)
# If v is a vertex,
# mate[v] is the remote endpoint of its matched edge, or -1 if it is single
# (i.e. endpoint[mate[v]] is v's partner vertex).
# Initially all vertices are single; updated during augmentation.
mate = nvertex * [ -1 ]
# If b is a top-level blossom,
# label[b] is 0 if b is unlabeled (free);
# 1 if b is an S-vertex/blossom;
# 2 if b is a T-vertex/blossom.
# The label of a vertex is found by looking at the label of its
# top-level containing blossom.
# If v is a vertex inside a T-blossom,
# label[v] is 2 iff v is reachable from an S-vertex outside the blossom.
# Labels are assigned during a stage and reset after each augmentation.
label = (2 * nvertex) * [ 0 ]
# If b is a labeled top-level blossom,
# labelend[b] is the remote endpoint of the edge through which b obtained
# its label, or -1 if b's base vertex is single.
# If v is a vertex inside a T-blossom and label[v] == 2,
# labelend[v] is the remote endpoint of t | he edge through which v is
# reachable from outside the blossom.
labelend = (2 * nvertex) * [ -1 ]
# I | f v is a vertex,
# inblossom[v] is the top-level blossom to which v belongs.
# If v is a top-level vertex, v is itself a blossom (a trivial blossom)
# and inblossom[v] == v.
# Initially all vertices are top-level trivial blossoms.
inblossom = range(nvertex)
# If b is a sub-blossom,
# blossomparent[b] is its immediate parent (sub-)blossom.
# If b is a top-level blossom, blossomparent[b] is -1.
blossomparent = (2 * nvertex) * [ -1 ]
# If b is a non-trivial (sub-)blossom,
# blossomchilds[b] is an ordered list of its sub-blossoms, starting with
# the base and going round the blossom.
blossomchilds = (2 * nvertex) * [ None ]
# If b is a (sub-)blossom,
# blossombase[b] is its base VERTEX (i.e. recursive sub-blossom).
blossombase = range(nvertex) + nvertex * [ -1 ]
# If b is a non-trivial (sub-)blossom,
# blossomendps[b] is a list of endpoints on its connecting edges,
# such that blossomendps[b][i] is the local endpoint of blossomchilds[b][i]
# on the edge that connects it to blossomchilds[b][wrap(i+1)].
blossomendps = (2 * nvertex) * [ None ]
# If v is a free vertex (or an unreached vertex inside a T-blossom),
# bestedge[v] is the edge to an S-vertex with least slack,
# or -1 if there is no such edge.
# If b is a (possibly trivial) top-level S-blossom,
# bestedge[b] is the least-slack edge to a different S-blossom,
# or -1 if there is no such edge.
# This is used for efficient computation of delta2 and delta3.
bestedge = (2 * nvertex) * [ -1 ]
# If b is a non-trivial top-level S-blossom,
# blossombestedges[b] is a list of least-slack edges to neighbouring
# S-blossoms, or None if no such list has been computed yet.
# This is used for efficient computation of delta3.
blossombestedges = (2 * nvertex) * [ None ]
# List of currently unused blossom numbers.
unusedblossoms = range(nvertex, 2*nvertex)
# If v is a vertex,
# dualvar[v] = 2 * u(v) where u(v) is the v's variable in the dual
# optimization problem (multiplication by two ensures integer values
# throughout the algorithm if all edge weights are integers).
# If b is a non-trivial blossom,
# dualvar[b] = z(b) where z(b) is b's variable in the dual optimization
# problem.
dualvar = nvertex * [ maxweight ] + nvertex * [ 0 ]
# If allowedge[k] is true, edge k has zero slack in the optimization
# problem; if allowedge[k] is false, the edge's slack may or may not
# be zero.
allowedge = nedge * [ False ]
# Queue of newly discovered S-vertices.
queue = [ ]
# Return 2 * slack of edge k (does not work inside blossoms).
def slack(k):
(i, j, wt) = edges[k]
return dualvar[i] + dualvar[j] - 2 * wt
# Generate the leaf vertices of a blossom.
def blossomLeaves(b):
if b < nvertex:
yield b
else:
for t in blossomchilds[b]:
if t < nvertex:
yield t
else:
for v in blossomLeaves(t):
yield v
# Assign label t to the top-level blossom containing vertex w
# and record the fact that w was reached through the edge with
# remote endpoint p.
def assignLabel(w, t, p):
if DEBUG: DEBUG('assignLabel(%d,%d,%d)' % (w, t, p))
b = inblossom[w]
assert label[w] == 0 and label[b] == 0
label[w] = label[b] = t
labelend[w] = labelend[b] = p
bestedge[w] = bestedge[b] = -1
if t == 1:
# b became an S-vertex/blossom; add it(s vertices) to the queue.
queue.extend(blossomLeaves(b))
if DEBUG: DEBUG('PUSH ' + str(list(blossomLeaves(b))))
elif t == 2:
# b became a T-vertex/blossom; assign label S to its mate.
# (If b is a non-trivial blossom, its base is the only vertex
# with an external mate.)
base = blossombase[b]
assert mate[base] >= 0
assignLabel(endpoint[mate[base]], 1, mate[base] ^ 1)
# Trace back from vertices v and w to discover either a new blossom
# or an augmenting path. Return the base vertex of th |
hyperNURb/ggrc-core | src/ggrc/models/response.py | Python | apache-2.0 | 5,096 | 0.009027 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: vraj@reciprocitylabs.com
from ggrc import db
from ggrc.models.mixins import (
deferred, Noted, Described, Hyperlinked, WithContact, Titled, Slugged,
)
from ggrc.models.object_document import Documentable
from ggrc.models.object_person import Personable
from ggrc.models.relationship import Relatable
from ggrc.models.request import Request
class Response(Noted, Described, Hyperlinked, WithContact,
Titled, Slugged, db.Model):
__tablename__ = 'responses'
__mapper_args__ = {
'polymorphic_on': 'response_type',
}
_title_uniqueness = False
_slug_uniqueness = False
# Override `Titled.title` to provide default=""
title = deferred(
db.Column(db.String, nullable=False, default=""), 'Response')
VALID_STATES = (u'Assigned', u'Submitted', u'Accepted', u'Rejected')
VALID_TYPES = (u'documentation', u'interview', u'population sample')
request_id = deferred(
db.Column(db.Integer, db.ForeignKey('requests.id'), nullable=False),
'Response')
response_type = db.Column(db.Enum(*VALID_TYPES), nullable=False)
status = deferred(db.Column(db.String, nullable=False), 'Response')
population_worksheet_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id'), nullable=True),
'Response')
population_count = deferred(db.Column(db.Integer, nullable=True), 'Response')
sample_worksheet_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id'), nullable=True),
'Response')
sample_count = deferred(db.Column(db.Integer, nulla | ble=True), 'Response')
sample_evidence_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id'), nullable=True),
'Response')
population_worksheet = db.relationship(
"Document",
foreign_keys="PopulationSampleResponse.population_worksheet_id"
)
sample_worksheet = db.relationship(
"Document",
foreign_keys="PopulationSampleResponse.sample_workshee | t_id"
)
sample_evidence = db.relationship(
"Document",
foreign_keys="PopulationSampleResponse.sample_evidence_id"
)
@staticmethod
def _extra_table_args(cls):
return (
db.Index('population_worksheet_document', 'population_worksheet_id'),
db.Index('sample_evidence_document', 'sample_evidence_id'),
db.Index('sample_worksheet_document', 'sample_worksheet_id'),
)
_publish_attrs = [
'request',
'status',
'response_type',
]
_sanitize_html = [
'description',
]
_aliases = {
"description": "Response",
"request": {
"display_name": "Request",
"mandatory": True,
"filter_by": "_filter_by_request",
},
"response_type": {
"display_name": "Response Type",
"mandatory": True,
},
"status": "Status",
"title": None,
"secondary_contact": None,
"notes": None,
}
def _display_name(self):
return u'Response with id={0} for Audit "{1}"'.format(
self.id, self.request.audit.display_name)
@classmethod
def _filter_by_request(cls, predicate):
return Request.query.filter(
(Request.id == cls.request_id) &
predicate(Request.slug)
).exists()
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Response, cls).eager_query()
return query.options(
orm.joinedload('request'))
class DocumentationResponse(Relatable, Documentable, Personable, Response):
__mapper_args__ = {
'polymorphic_identity': 'documentation'
}
_table_plural = 'documentation_responses'
_publish_attrs = []
_sanitize_html = []
class InterviewResponse(Relatable, Documentable, Personable, Response):
__mapper_args__ = {
'polymorphic_identity': 'interview'
}
_table_plural = 'interview_responses'
meetings = db.relationship(
'Meeting',
backref='response',
cascade='all, delete-orphan'
)
_publish_attrs = [
'meetings',
]
_sanitize_html = []
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(InterviewResponse, cls).eager_query()
return query.options(
orm.subqueryload('meetings'))
class PopulationSampleResponse(Relatable, Documentable, Personable, Response):
__mapper_args__ = {
'polymorphic_identity': 'population sample'
}
_table_plural = 'population_sample_responses'
_publish_attrs = [
'population_worksheet',
'population_count',
'sample_worksheet',
'sample_count',
'sample_evidence',
]
_sanitize_html = [
'population_count',
'sample_count',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(PopulationSampleResponse, cls).eager_query()
return query.options(
orm.joinedload('population_worksheet'),
orm.joinedload('sample_worksheet'),
orm.joinedload('sample_evidence'))
|
daniele-athome/kontalk-legacy-xmppserver | kontalk/xmppserver/component/c2s/handlers.py | Python | gpl-3.0 | 16,001 | 0.004 | # -*- coding: utf-8 -*-
"""c2s protocol handlers."""
"""
Kontalk XMPP server
Copyright (C) 2014 Kontalk Devteam <devteam@kontalk.org>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import time
import base64
import traceback
from twisted.words.protocols.jabber import xmlstream, jid
from twisted.words.protocols.jabber.xmlstream import XMPPHandler
from twisted.words.xish import domish
from wokkel import component
from kontalk.xmppserver import log, util, xmlstream2
class InitialPresenceHandler(XMPPHandler):
"""
Handle presence stanzas and client disconnection.
@type parent: L{C2SManager}
"""
def connectionInitialized(self):
self.xmlstream.addObserver("/presence[not(@type)][@to='%s']" % (self.xmlstream.thisEntity.full(), ), self.presence)
def send_presence(self, to):
"""
Sends all local presence data (available and unavailable) to the given
entity.
"""
def _db(presence, to):
from copy import deepcopy
log.debug("presence: %r" % (presence, ))
if type(presence) == list and len(presence) > 0:
for user in presence:
response_from = util.userid_to_jid(user['userid'], self.parent.xmlstream.thisEntity.host).full()
num_avail = 0
try:
streams = self.parent.sfactory.streams[user['userid']]
for x in streams. | itervalues():
presence = x._presence
if presence and not presence.hasAttribute('type'):
response = domish.Element((None, 'presence'))
response['to'] = to
response['from'] = presence['from']
# copy stuff
for child in ('status', 'show', 'priority'):
| e = getattr(presence, child)
if e:
response.addChild(deepcopy(e))
self.send(response)
num_avail += 1
except KeyError:
pass
# no available resources - send unavailable presence
if not num_avail:
response = domish.Element((None, 'presence'))
response['to'] = to
response['from'] = response_from
if user['status'] is not None:
response.addElement((None, 'status'), content=user['status'])
if user['show'] is not None:
response.addElement((None, 'show'), content=user['show'])
response['type'] = 'unavailable'
delay = domish.Element(('urn:xmpp:delay', 'delay'))
delay['stamp'] = user['timestamp'].strftime(xmlstream2.XMPP_STAMP_FORMAT)
response.addChild(delay)
self.send(response)
if self.parent.logTraffic:
log.debug("presence sent: %s" % (response.toXml().encode('utf-8'), ))
else:
log.debug("presence sent: %s" % (response['from'], ))
# send vcard
iq_vcard = domish.Element((None, 'iq'))
iq_vcard['type'] = 'set'
iq_vcard['from'] = response_from
iq_vcard['to'] = to
# add vcard
vcard = iq_vcard.addElement((xmlstream2.NS_XMPP_VCARD4, 'vcard'))
if user['fingerprint']:
pub_key = self.parent.keyring.get_key(user['userid'], user['fingerprint'])
if pub_key:
vcard_key = vcard.addElement((None, 'key'))
vcard_data = vcard_key.addElement((None, 'uri'))
vcard_data.addContent("data:application/pgp-keys;base64," + base64.b64encode(pub_key))
self.send(iq_vcard)
if self.parent.logTraffic:
log.debug("vCard sent: %s" % (iq_vcard.toXml().encode('utf-8'), ))
else:
log.debug("vCard sent: %s" % (iq_vcard['from'], ))
d = self.parent.presencedb.get_all()
d.addCallback(_db, to)
def presence(self, stanza):
"""
This initial presence is from a broadcast sent by external entities
(e.g. not the sm); sm wouldn't see it because it has no observer.
Here we are sending offline messages directly to the connected user.
"""
log.debug("initial presence from router by %s" % (stanza['from'], ))
try:
# receiving initial presence from remote c2s, send all presence data
unused, host = util.jid_component(stanza['from'], util.COMPONENT_C2S)
if host != self.parent.servername and host in self.parent.keyring.hostlist():
log.debug("remote c2s appeared, sending all local presence and vCards to %s" % (stanza['from'], ))
self.send_presence(stanza['from'])
except:
pass
sender = jid.JID(stanza['from'])
# check for external conflict
self.parent.sfactory.check_conflict(sender)
if sender.user:
try:
unused, host = util.jid_component(sender.host, util.COMPONENT_C2S)
# initial presence from a client connected to another server, clear it from our presence table
if host != self.parent.servername and host in self.parent.keyring.hostlist():
log.debug("deleting %s from presence table" % (sender.user, ))
self.parent.presencedb.delete(sender.user)
except:
pass
# initial presence - deliver offline storage
def output(data, user):
log.debug("data: %r" % (data, ))
to = user.full()
for msg in data:
log.debug("msg[%s]=%s" % (msg['id'], msg['stanza'].toXml().encode('utf-8'), ))
try:
"""
Mark the stanza with our server name, so we'll receive a
copy of the receipt
"""
if msg['stanza'].request:
msg['stanza'].request['from'] = self.xmlstream.thisEntity.full()
elif msg['stanza'].received:
msg['stanza'].received['from'] = self.xmlstream.thisEntity.full()
# mark delayed delivery
if 'timestamp' in msg:
delay = msg['stanza'].addElement((xmlstream2.NS_XMPP_DELAY, 'delay'))
delay['stamp'] = msg['timestamp'].strftime(xmlstream2.XMPP_STAMP_FORMAT)
msg['to'] = to
self.send(msg['stanza'])
"""
If a receipt is requested, we won't delete the message from
storage now; we must be sure client has received it.
Otherwise just delete the message immediately.
"""
if not xmlstream2.extract_receipt(msg['stanza'], 'request') and \
not xmlstream2.extract_receipt(stanza, 'received'):
|
eLRuLL/scrapy | tests/test_utils_request.py | Python | bsd-3-clause | 4,301 | 0.003953 | import unittest
from scrapy.http import Request
from scrapy.utils.request import request_fingerprint, _fingerprint_cache, \
request_authenticate, request_httprepr
class UtilsRequestTest(unittest.TestCase):
def test_request_fingerprint(self):
r1 = Request("http://www.example.com/query?id=111&cat=222")
r2 = Request("http://www.example.com/query?cat=222&id=111")
self.assertEqual(request_fingerprint(r1), request_fingerprint(r1))
self.assertEqual(request_fingerprint(r1), request_fingerprint(r2))
r1 = Request('http://www.example.com/hnnoticiaj1.aspx?78132,199')
r2 = Request('http://www.example.com/hnnoticiaj1.aspx?78160,199')
sel | f.assertNotEqual(request_fingerprint(r1), request_fingerprint(r2))
# make sure caching is working
self.assertEqual(request_fingerprint(r1), _fingerprint_cache[r1][(None, False)])
r1 = Request("http://www.example.com/members/offers.html")
r2 = Request("http://www.example.com/members/offers.html")
r2.headers['SESSIONID'] = b"somehash"
self.assertEqual(request_fingerprint(r1), request_fingerprint(r2))
r1 = Request("http://www.example.com/")
| r2 = Request("http://www.example.com/")
r2.headers['Accept-Language'] = b'en'
r3 = Request("http://www.example.com/")
r3.headers['Accept-Language'] = b'en'
r3.headers['SESSIONID'] = b"somehash"
self.assertEqual(request_fingerprint(r1), request_fingerprint(r2), request_fingerprint(r3))
self.assertEqual(request_fingerprint(r1),
request_fingerprint(r1, include_headers=['Accept-Language']))
self.assertNotEqual(request_fingerprint(r1),
request_fingerprint(r2, include_headers=['Accept-Language']))
self.assertEqual(request_fingerprint(r3, include_headers=['accept-language', 'sessionid']),
request_fingerprint(r3, include_headers=['SESSIONID', 'Accept-Language']))
r1 = Request("http://www.example.com/test.html")
r2 = Request("http://www.example.com/test.html#fragment")
self.assertEqual(request_fingerprint(r1), request_fingerprint(r2))
self.assertEqual(request_fingerprint(r1), request_fingerprint(r1, keep_fragments=True))
self.assertNotEqual(request_fingerprint(r2), request_fingerprint(r2, keep_fragments=True))
self.assertNotEqual(request_fingerprint(r1), request_fingerprint(r2, keep_fragments=True))
r1 = Request("http://www.example.com")
r2 = Request("http://www.example.com", method='POST')
r3 = Request("http://www.example.com", method='POST', body=b'request body')
self.assertNotEqual(request_fingerprint(r1), request_fingerprint(r2))
self.assertNotEqual(request_fingerprint(r2), request_fingerprint(r3))
# cached fingerprint must be cleared on request copy
r1 = Request("http://www.example.com")
fp1 = request_fingerprint(r1)
r2 = r1.replace(url="http://www.example.com/other")
fp2 = request_fingerprint(r2)
self.assertNotEqual(fp1, fp2)
def test_request_authenticate(self):
r = Request("http://www.example.com")
request_authenticate(r, 'someuser', 'somepass')
self.assertEqual(r.headers['Authorization'], b'Basic c29tZXVzZXI6c29tZXBhc3M=')
def test_request_httprepr(self):
r1 = Request("http://www.example.com")
self.assertEqual(request_httprepr(r1), b'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n')
r1 = Request("http://www.example.com/some/page.html?arg=1")
self.assertEqual(request_httprepr(r1), b'GET /some/page.html?arg=1 HTTP/1.1\r\nHost: www.example.com\r\n\r\n')
r1 = Request("http://www.example.com", method='POST', headers={"Content-type": b"text/html"}, body=b"Some body")
self.assertEqual(request_httprepr(r1), b'POST / HTTP/1.1\r\nHost: www.example.com\r\nContent-Type: text/html\r\n\r\nSome body')
def test_request_httprepr_for_non_http_request(self):
# the representation is not important but it must not fail.
request_httprepr(Request("file:///tmp/foo.txt"))
request_httprepr(Request("ftp://localhost/tmp/foo.txt"))
if __name__ == "__main__":
unittest.main()
|
aroth-arsoft/arsoft-meta-packages | grp_dovecot.py | Python | gpl-3.0 | 1,242 | 0.045089 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
dovecot = [
| {'name':'common',
'mainpackage':True,
'short | desc':'Installs the latest version of the Dovecot mail server',
'description':'',
'packages':['dovecot-core', 'dovecot-antispam', 'dovecot-imapd', 'dovecot-pop3d',
'dovecot-gssapi', 'dovecot-lmtpd', 'dovecot-managesieved', 'dovecot-solr' ]
},
{'name':'mysql',
'shortdesc':'Installs the latest version of the Dovecot mail server with MySQL support',
'description':'',
'depends':['common'],
'packages':['dovecot-mysql'],
},
{'name':'pgsql',
'shortdesc':'Installs the latest version of the Dovecot mail server with PostgreSQL support',
'description':'',
'depends':['common'],
'packages':['dovecot-pgsql'],
},
{'name':'sqlite',
'shortdesc':'Installs the latest version of the Dovecot mail server with SQLite support',
'description':'',
'depends':['common'],
'packages':['dovecot-sqlite'],
},
{'name':'none',
'shortdesc':'Uninstalls all versions of the Dovecot mail server',
'description':'',
'packages':[],
'noconflicts':[]
},
]
|
JhooClan/QLibCJ | qalg.py | Python | mit | 9,431 | 0.023976 | from qlibcj import *
def DJAlg(size, U_f, **kwargs): # U_f es el oraculo, que debe tener x1..xn e y como qubits. Tras aplicarlo el qubit y debe valer f(x1..xn) XOR y. El argumento size es n + 1, donde n es el numero de bits de entrada de f.
rnd.seed(kwargs.get('seed', None)) # Para asegurar la repetibilidad fijamos la semilla antes del experimento.
r = QRegistry(([0 for i in range(size - 1)] + [1])) # Los qubits se inicializan a cero (x1..xn) excepto el ultimo (y), inicializado a uno
r.applyGate(H(size)) # Se aplica una compuerta hadamard a todos los qubits
r.applyGate(U_f) # Se aplica el oraculo
r.applyGate(H(size - 1), I(1)) # Se aplica una puerta Hadamard a todos los qubits excepto al ultimo
return r.measure([1 for i in range(size - 1)] + [0]) # Se miden los qubit x, si es igual a 0 la funcion es constante. En caso contrario no lo es.
def ExampleDJCircuit(size, U_f, **kwargs):
rnd.seed(kwargs.get('seed', None)) # Para asegurar la repetibilidad fijamos la semilla antes del experimento.
c = DJAlgCircuit(size, U_f, save=kwargs.get('save', True))
res = c.execute([0 for i in range(size - 1)]) # Los qubits se inicializan a cero (x1..xn) excepto el ultimo (y), inicializado a uno por el circuito tal y como se indicó en su construccion
print(all(i == 0 for i in res[1][0][:-1]))
return res # Los qubits se inicializan a cero (x1..xn) excepto el ultimo (y), inicializado a uno por el circuito tal y como se indicó en su construccion
def DJAlgCircuit(size, U_f, save=True): # U_f es el oraculo, que debe tener x1..xn e y como qubits. Tras aplicarlo el qubit y debe valer f(x1..xn) XOR y. El argumento size es n + 1, donde n es el numero de bits de entrada de f.
c = QCircuit("Deutsch-Josza Algorithm", save=save, ancilla=[1]) # El ultimo QuBit al ejecutar el algoritmo es de ancilla, con su valor a 1
c.addLine(H(size)) # Se aplica una compuerta hadamard a todos los qubits
c.addLine(U_f) # Se aplica el oraculo
c.addLine(H(size - 1), I(1)) # Se aplica una puerta Hadamard a todos los qubits excepto al ultimo
# f = lambda _, l: print(all(i == 0 for i in l[:-1])) # Funcion que imprimira cierto tras realizar la medida si la funcion es constante
# c.addLine(Measure([1 for i in range(size - 1)] + [0], tasks=[f])) # Se miden los qubit x, si es igual a 0 la funcion es constante. En caso contrario no lo es.
c.addLine(Measure([1 for i in range(size - 1)] + [0])) # Se miden los qubit x, si es igual a 0 la funcion es constante. En caso contrario no lo es.
return c
'''
Crea un oraculo U_f tal y como viene definido en el algoritmo de Deutsch-Josza para una funcion balanceada f: {0,1}^n ---> {0,1}, f(x) = msb(x) (bit mas significativo de x).
El argumento n no es el numero de bits de la entrada de f, sino dicho numero mas 1 (para el qubit de "salida").
'''
def Bal(n):
b = I(n)
'''
Se invierte el valor del qubit y en los casos en los que el bit mas significativo sea 1.
Una puerta C-NOT serviria de U_f con la definicion de f dada con n = 2. Bal(2) = CNOT().
'''
for i in range(int((2**n)/2), (2**n) - 1, 2):
t = np.copy(b[i,:])
b[i], b[i+1] = b[i+1, :], t
return b
'''
U_f generada con n = 3:
1 0 0 0 0 0 0 0
0 1 0 0 0 0 0 0
0 0 1 0 0 0 0 0
0 0 0 1 0 0 0 0
0 0 0 0 0 1 0 0
0 0 0 0 1 0 0 0
0 0 0 0 0 0 0 1
0 0 0 0 0 0 1 0
Las entradas son, empezando por el qubit mas significativo: x1, x2 e y.
Al aplicar el oraculo lo que hara es intercambiar la probabilidad asociada a |100> con la de |101> y la de |110> con |111>.
De forma mas general, la funcion Bal se observa que devuelve siempre una puerta que al ser aplicada a un conjunto x1, ..., xn, y
de qubits aplicara una C-NOT sobre x1 (control) e y (objetivo), dejando el resto de qubits intactos.
De esta forma el oraculo pondra en el qubit y el valor de x1 XOR y. Como para la mitad de las posibles entradas x1 valdra 0
y para la otra mitad 1, la funcion f es balanceada ya que devuelve 0 para la mitad de las posibles entradas y 1 para la otra mitad.
El oraculo U_f a su vez se comporta como se indica en el algoritmo, teniendo que y <- f(x) XOR y.
'''
def Teleportation(qbit, **kwargs): # El qubit que va a ser teleportado. Aunque en un computador cuantico real no es posible ver el valor de un qubit sin que colapse, al ser un simulador se puede. Puede especificarse semilla con seed = <seed>.
rnd.seed(kwargs.get('seed', None)) # Se fija la semilla antes de comenzar el experimento. En este caso la tomamos por parametro.
r = QRegistry([qbit, 0, 0]) # Se crea un registro con el qubit que debe ser enviado a Alice, el qubit de Bob y el de Alice, en adelante Q, B y A. B y A estan inicializados a |0>.
print ("Original registry:\n", r.state) # Se muestra el estado del registro de qubits.
r.applyGate(I(1), H(1), I(1)) # Se aplica la puerta Hadamard a B, ahora en una superposicion de los estados |0> y |1>, ambos exactamente con la misma probabilidad.
r.applyGate(I(1), CNOT()) # Se aplica una puerta C-NOT sobre B (control) y A (objetivo).
print ("With Bell+ state:\n", r.state) # Tras la aplicacion de las anteriores dos puertas tenemos un estado de Bell +, B y A estan entrelazados. Se muestra el valor del registro.
# Aqui es donde trabajamos con el qubit Q que queremos enviar posteriormente. En este caso de ejemplo le vamos a aplicar Hadamard y despues un cambio de fase de pi/2
r.applyGate(H(1), I(2))
r.applyGate(PhaseShift(np.pi/2), I(2))
# Una vez terminado todo lo que queremos hacerle al QuBit, procedemos a preparar el envio
r.applyGate(CNOT(), I(1)) # Se aplica una puerta C-NOT sobre Q (control) y B (objetivo).
r.applyGate(H(1), I(2)) # Se aplica una puerta Hadamard sobre Q.
print ("\nBefore measurement:\n", r.state) # Se muestra el valor del registro antes de la medida.
m = r.measure([1,1,0]) # Se miden los qubits Q y B.
print ("q0 = ", m[0], "\nq1 = ", m[1]) # Se muestra el resultado de la medida
q0 = 0 # Se crean para ver que la teleportacion se realiza con exito dos qubits, q0 y q1.
q1 = 0 # Usandolos crearemos un registro con los valores que debe tener si la teleportacion se ha realizado con exito.
if (m[1] == 1):
q1 = 1
r.applyGate(I(2), PauliX()) # Si al medir B obtuvimos un 1, rotamos A en el eje X (Pauli-X o NOT)
if (m[0] == 1):
q0 = 1
r.applyGate(I(2), PauliZ()) # Si al medir Q obtuvimos un 1, rotamos A en el eje Z (Pauli-Z).
er = QRegistry([q0, q1, qbit]) # Se crea el registro para testeo mencionado anteriormente.
# Y aplicamos las mismas operaciones para ver que es lo que se debe recibir, en este caso Hadamard y PhaseShift.
er.applyGate(I(2), H(1))
er.applyGate(I(2), PhaseShift(np.pi/2))
print ("\nExpected result:\n", er.state, "\nResult:\n", r.state) # Se muestra el contenido de los registros, tanto el del resultado esperado como el obtenido.
print ("Assert: " + str(r.state == er.state))
return r # Se devuelve el registro obtenido tras aplicar el algoritmo.
def TeleportationCircuit(gate, save=True): # Recibe como argumento lo que se va a ejecutar sobre el primer QuBit despues de hacer el estado de Bell con los dos últimos.
qc = QCircuit("Teleportation", save=save, ancilla=[0, 0])
qc.add | Line(I(1), H(1), I(1))
qc.addLine(I(1), CNOT())
# Aqui es donde trabajamos con el qubit Q que queremos enviar posteriormente. Se le aplica la puerta pasada como parámetro
qc.addLine(gate, I(2))
# Una vez terminado todo lo que queremos ha | cerle al QuBit, procedemos a preparar el envio
qc.addLine(CNOT(), I(1)) # Se aplica una puerta C-NOT sobre Q (control) y B (objetivo).
qc.addLine(H(1), I(2)) # Se aplica una puerta Hadamard sobre Q.
c1 = Condition([None, 1, None], PauliX())
c2 = Condition([1, None, None], PauliZ())
m = Measure([1, 1, 0], conds=[c1, c2], remove=True)
qc.addLine(m)
return qc # Se devuelve el circuito.
def ExampleTC(value, gate, **kwargs): # El valor debe ser 0 o 1, valor inicial del QuBit a teleportar. Gate es la puerta que se va a aplicar sobre el QuBit a teleportar.
rnd.seed(kwargs.get('seed', None)) # Para asegurar la repetibilidad fijamos la semilla antes del experimento.
# Diseñamos la puerta que se va a aplicar sobre el QuBit
#g = QGate()
#g.addLine(H(1))
#g.addLine(PhaseShift(np.pi/2))
c = TeleportationCircuit(gate, save=kwargs. |
scVENUS/PeekabooAV | peekaboo/server.py | Python | gpl-3.0 | 13,675 | 0.000219 | ###############################################################################
# #
# Peekaboo Extended Email Attachment Behavior Observation Owl #
# #
# server.py #
###############################################################################
# #
# Copyright (C) 2016-2020 science + computing ag #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or (at #
# your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, but #
# WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
""" This module implements the Peekaboo server, i.e. the frontend to the
client. """
import asyncio
import email.utils
import logging
import urllib.parse
import sanic
import sanic.headers
import sanic.response
from peekaboo.db import PeekabooDatabaseError
logger = logging.getLogger(__name__)
class PeekabooServer:
""" A class wrapping the server components of Peekaboo. """
def __init__(self, host, port, job_queue, sample_factory,
request_queue_size, db_con):
""" Initialise a new server and start it. All error conditions are
returned as exceptions.
@param host: The local address to bind the socket to.
@type host: String
@param port: The local port to listen on for client connections.
@type port: int
@param job_queue: A reference to the job queue for submission of
samples.
@type job_queue: JobQueue
@param sample_factory: A reference to a sample factory for creating new
samples.
@type sample_factory: SampleFactory
@param request_queue_size: Number of requests that may be pending on
the socket.
@type request_queue_size: int
"""
logger.debug('Starting up server.')
self.app = sanic.Sanic("PeekabooAV", configure_logging=False)
self.app.config.FALLBACK_ERROR_FORMAT = "json"
# silence sanic to a reasonable amount
logging.getLogger('sanic.root').setLevel(logging.WARNING)
logging.getLogger('sanic.access').setLevel(logging.WARNING)
self.loop = asyncio.get_event_loop()
self.server_coroutine = self.app.create_server(
host=host, port=port, return_asyncio_server=True,
backlog=request_queue_size,
asyncio_server_kwargs=dict(start_serving=False))
self.server = None
self.job_queue = job_queue
self.sample_factory = sample_factory
self.db_con = db_con
# remember for diagnostics
self.host = host
self.port = port
self.app.add_route(self.hello, '/')
self.app.add_route(self.ping, '/ping')
self.app.add_route(self.scan, "/v1/scan", methods=['POST'])
self.app.add_route(
self.report, '/v1/report/<job_id:int>', methods=['GET'])
async def hello(self, _):
""" hello endpoint as fallback and catch all
@returns: hello world json response
"""
return sanic.response.json({'hello': 'PeekabooAV'})
async def ping(self, _):
""" ping endpoint for diagnostics
@returns: pong json response
"""
return sanic.response.json({'answer': 'pong'})
async def scan(self, request):
""" scan endpoint for job submission
@param request: sanic request object
@type request: sanic.Request
@returns: json response containing ID of newly created job
"""
# this is sanic's multipart/form-data parser in a version that knows
# that our file field contains binary data. This allows transferring
# files without a filename. The generic parser would treat those as
# text fields and try to decode them using the form charset or UTF-8 as
# a fallback and cause errors such as: UnicodeDecodeError: 'utf-8'
# codec can't decode byte 0xc0 in position 1: invalid start byte
content_type, parameters = sanic.headers.parse_content_header(
reques | t.content_type)
# application/x-www-form-urlencoded is inefficient at transporting
# binary data. Also it needs a separate field to transfer the filename.
# Make clear here that we do not support that format (yet).
if content_type != 'multipart/form-data':
logger.error('Invalid content type %s', content_type)
return sanic.response.json(
{'message': 'Invalid content type, use multipart/form-data'},
| 400)
boundary = parameters["boundary"].encode("utf-8")
form_parts = request.body.split(boundary)
# split above leaves preamble in form_parts[0] and epilogue in
# form_parts[2]
num_fields = len(form_parts) - 2
if num_fields <= 0:
logger.error('Invalid MIME structure in request, no fields '
'or preamble or epilogue missing')
return sanic.response.json(
{'message': 'Invalid MIME structure in request'}, 400)
if num_fields != 1:
logger.error('Invalid number of fields in form: %d', num_fields)
return sanic.response.json(
{'message': 'Invalid number of fields in form, we accept '
'only one field "file"'}, 400)
form_part = form_parts[1]
file_name = None
content_type = None
field_name = None
line_index = 2
line_end_index = 0
while line_end_index != -1:
line_end_index = form_part.find(b'\r\n', line_index)
# this constitutes a hard requirement for the multipart headers
# (and filenames therein) to be UTF-8-encoded. There are some
# obscure provisions for transferring an encoding in RFC7578
# section 5.1.2 for HTML forms which don't apply here so its
# fallback to UTF-8 applies. This is no problem for our field name
# (ASCII) and file names in RFC2231 encoding. For HTML5-style
# percent-encoded filenames it means that whatever isn't
# percent-encoded needs to be UTF-8 encoded. There are no rules in
# HTML5 currently to percent-encode any UTF-8 byte sequences.
form_line = form_part[line_index:line_end_index].decode('utf-8')
line_index = line_end_index + 2
if not form_line:
break
colon_index = form_line.index(':')
idx = colon_index + 2
form_header_field = form_line[0:colon_index].lower()
# parse_content_header() reverts some of the percent encoding as
# per HTML5 WHATWG spec. As it is a "living standard" (i.e. moving
# target), it has changed over the years. There used to be
# backslash doubling and expli |
jiasir/playback | playback/cli/cli.py | Python | mit | 2,192 | 0.006387 | import sys
import argparse
import pkg_resources
from playback import __version__
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='OpenStack provisioning and orchestration library with command-line tools'
)
parser.add_argument(
'-v', '--version',
action='version', version=__version__ ,
)
parser.add_argument(
'--user',
help='the username to connect to the remote host', action='store', default='ubuntu', dest='user'
)
parser.add_argument(
'--hosts',
help='the remote host to connect to ', action='store', default=None, dest='hosts'
)
parser.add_argument(
'-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting', action='store', dest='key_filename', default=None
)
parser.add_argument(
'--password',
help='the password used by the SSH layer when connecting to remote hosts', action='store', dest='password', default=None
)
subparser = parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
provision_parser = subparser.add_parser(
'provision',
help='provision and manage OpenStack'
)
provision_subparser = provision_parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
entry_points = [
(ep.name, ep.load())
for ep in pkg_resources.iter_entry | _points('provision')
]
entry_ | points.sort(
key=lambda (name, fn): getattr(fn, 'priority', 100),
)
for (name, fn) in entry_points:
p = provision_subparser.add_parser(
name,
description=fn.__doc__,
help=fn.__doc__,
)
fn(p)
return parser
def _main():
parser = get_parser()
if len (sys.argv) < 2 :
parser.print_help()
sys.exit()
else :
args = parser.parse_args()
return args.func(args)
def main():
try:
_main()
except:
pass
if __name__ == '__main__':
main()
|
novafloss/django-formidable | formidable/utils.py | Python | mit | 342 | 0 | from importlib import import_module
def import_object(object_path):
"""
Import class or function by path
:param object_path: path to the obj | ect for import
:return: imported object
"""
module_path, class_name = object_path.rsplit('.', 1)
module = import_module(module_path)
retur | n getattr(module, class_name)
|
ericdill/bokeh | bokeh/server/storage/backbone_storage.py | Python | bsd-3-clause | 3,761 | 0.003988 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
from bokeh import protocol
from bokeh.document import Document
from bokeh.util.serialization import dump
from bokeh.util.string import decode_utf8, encode_utf8
from .abstract_backbone_storage import AbstractBackboneStorage
class BackboneStorage(AbstractBackboneStorage):
"""
"""
def pull(self, docid, typename=None, objid=None):
"""you need to call this with either typename AND objid
or leave out both. leaving them out means retrieve all
otherwise, retrieves a specific object
"""
doc_keys = self.smembers(_dockey(docid))
attrs = self.mget(doc_keys)
data = []
for k, attr in zip(doc_keys, attrs):
typename, _, modelid = _parse_modelkey(k)
attr = protocol.deserialize_json(decode_utf8(attr))
data.append({'type': typename, 'attributes': attr})
return data
def get_document(self, docid):
json_objs = self.pull(docid)
doc = Document(json_objs)
doc.docid = docid
return doc
def store_objects(self, docid, *objs, **kwargs):
dirty_only = kwargs.pop('dirty_only', True)
models = set()
for obj in objs:
models.add(obj.references())
if dirty_only:
models = list(models)
json_objs = dump(models, docid)
self.push(docid, *json_objs)
for mod in models:
mod._dirty = False
return models
def store_document(self, doc, temporary_docid=None, dirty_only=True):
"""store all dirty models
"""
# This is not so nice - we need to use doc with the original docid
# when we create json objs, how | ever use the temporary_docid
# when we actually store the values
# TODO: refactor this API in the future for better separation
if temporary_docid is not None:
storage_id = temporary_docid
else:
storage_id = doc.docid
logger.debug("storing objects to %s", storage_id)
| models = doc._models.values()
if dirty_only:
models = [x for x in models if hasattr(x, '_dirty') and x._dirty]
json_objs = doc.dump(*models)
self.push(storage_id, *json_objs)
for mod in models:
mod._dirty = False
return models
def push(self, docid, *jsonobjs):
keys = [_modelkey(attr['type'],
docid,
attr['attributes']['id']) for attr in jsonobjs]
for attr in jsonobjs:
attr['attributes']['doc'] = docid
attrs = [protocol.serialize_json(attr['attributes']) for attr in jsonobjs]
dkey = _dockey(docid)
data = dict(zip(keys, attrs))
self.mset(data)
self.sadd(dkey, *keys)
def del_obj(self, docid, m):
mkey = _modelkey(m.__view_model__, docid, m._id)
self.srem(_dockey(docid), mkey)
self.delete(mkey)
def _dockey(docid):
docid = encode_utf8('doc:' + docid)
return docid
def _modelkey(typename, docid, modelid):
docid = encode_utf8(docid)
modelid = encode_utf8(modelid)
return 'bbmodel:%s:%s:%s' % (typename, docid, modelid)
def _parse_modelkey(key):
_, typename, docid, modelid = decode_utf8(key).split(":")
return typename, docid, modelid |
nick-huang-cc/GraffitiSpaceTT | UnderstandStudyPython/TCP_stu2.py | Python | agpl-3.0 | 1,020 | 0.015588 | #!/usr/bin/env python3
# -*- coding:UTF-8 -*-
#Copyright (c) 1986 Nick Wong.
#Copyright (c) 2016-2026 TP-NEW Corp.
# License: TP-NEW (www.tp-new.com)
__author__ = "Nick Wong"
'''
TCP 客户端
'''
#导入socket库
import socket
#创建一个socket AF_INET是定IPv4协议、AF_INET6为指定IPv6协议, SOCK_STREAM指定使用面向流的TCP协议
s = socket.socket(socket.AF_INET, socket.SOCK_S | TREAM)
#建立连接 如下参数tuple包含地址可端口号
s.connect(('www.wufazhuce.com', 80))
#建立TCP连接后就可以向连接发送请求,
#发送数据
s.send(b'GET / HTTP/1.1\r\nHost:www.wufazhuce.com\r\nConnection: close\r\n\r\n')
#接收数据
buffer = []
while True:
#每次最多接收1k字节
d = s.recv(1024)
if d:
buffer.append(d)
else:
break
data = b''.join | (buffer)
#关闭连接
s.close()
header, html = data.split(b'\r\n\r\n', 1)
print(header.decode('utf-8'))
#把接收的数据写入文件:
with open('com.html', 'wb') as f:
f.write(html)
|
dqnykamp/nykampweb | nykampweb/wsgi.py | Python | gpl-2.0 | 395 | 0 | """
WSGI co | nfig for nykampweb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nykampweb.settings")
application = get_wsgi_applicat | ion()
|
kingfisher1337/tns | qpotts_groundstate_1d/plot.py | Python | gpl-3.0 | 12,349 | 0.008017 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
from hashlib import md5
from fractions import Fraction
mpl.rcParams["text.usetex"] = True
mpl.rcParams["text.latex.preamble"] = "\usepackage{bm}"
def read_hash(path):
if not os.path.isfile(path):
return ""
with open(path, "rb") as f:
md = f.read()
return md
def write_hash(path, h):
f = open(path, "wb")
f.write(h)
f.close()
for filename in sorted(os.listdir("output")):
if filename.startswith("h_mz_"):
h = []
mz = []
md = md5()
with open("output/" + filename, "r") as f:
chi = filter(lambda s: s.find("chi=") != -1, filename[:-4].split("_"))[0].split("=")[-1]
svalerr = float(filter(lambda s: s.find("svalerr=") != -1, filename[:-4].split("_"))[0].split("=")[-1])
q = int(filter(lambda s: s.find("q=") != -1, filename[:-4].split("_"))[0].split("=")[-1])
for line in f:
fields = line.split(" ")
h.append(-float(fields[0]))
mz.append(map(float, fields[1:1+q]))
md.update(line)
markers = ["x", "+", "o", "D", "v", "^", "<", ">"]
colours = ["b", "g", "r", "c", "m", "y", "k", "b"]
if md.hexdigest() != read_hash("plots/" + filename.split(".dat")[0] + ".md5"):
print filename
for j in xrange(q):
s = "| {:d} \\rangle \\langle {:d} |".format(j,j)
plt.plot(h, map(lambda x: x[j], mz), markers[j], mfc="none", mec=colours[j], label="$\\langle " + s + " \\rangle_{0,\\infty}$")
#plt.plot(h, mx, marker="x", label="$\\langle \\sigma_x \\rangle_{0,\\infty}$")
plt.title("1D-QPotts ground state via 2D-CTMRG ($q = " + str(q) + "$, $\\chi = " + chi + "$, $\\Delta \\xi = 10^{" + "{:.0f}".format(np.log10(svalerr)) + "}$)")
plt.legend(loc="best")
plt.xlabel("$-h$")
#plt.ylim(0,1)
plt.grid(True)
plt.savefig("plots/" + filename.split(".dat")[0] + ".png", dpi=300)
plt.close()
write_hash("plots/" + filename.split(".dat")[0] + ".md5", md.hexdigest())
primFieldsFM = [
None, None,
[Fraction(1,16)],
[Fraction(1,8),Fraction(13,8)]
]
primFieldsPM = [
None, None,
[Fraction(0), Fraction(1,2)],
[Fraction(0),Fraction(2,3)]
]
def scale_svals_for_fields(f, xi):
xi = np.sort(-np.log(xi))
f = sorted(f)[:2]
f0 = f[0]
if len(f) == 1:
f1 = f0 + 1
else:
f1 = min([f[1],f0+1])
xi = xi * float(f1-f0) / (xi[1]-xi[0])
xi = xi - xi[0] + f0
return xi
def scale_svals_fm(q, xi):
return scale_svals_for_fields(primFieldsFM[q], xi)
def scale_svals_pm(q, xi):
return scale_svals_for_fields(primFieldsPM[q], xi)
def get_yticks_for_fields(fields,ymin,ymax):
t = []
for f in fields:
for j in range(int(np.ceil(ymin-f)), int(np.floor(ymax-h))+1):
if not float(f+j) in t:
t.append(float(f+j))
return t
def get_yticklabels_for_fields(fields,ymin,ymax):
t = []
for f in fields:
for j in range(int(np.ceil(ymin-f)), int(np.floor(ymax-h))+1):
s = "0" if f == 0 else "\\frac{" + str(f.numerator) + "}{" + str(f.denominator) + "}"
if j > 0:
s += "+" + str(j)
s = "$" + s + "$"
if not s in t:
t.append(s)
return t
def get_fm_yticks(q,ymin,ymax):
return get_yticks_for_fields(primFieldsFM[q],ymin,ymax)
def | get_pm_yticks(q,ymin,ymax):
return get_yticks_for_fields(primFieldsPM[q],ymin,ymax)
def get_fm_yticklabels(q,ymin,ymax):
return get_yticklabels_for_fields(primFieldsFM[q],ymin,ymax)
def get_pm_yticklabels(q,ymin,ymax):
return get_yticklabels_for_ | fields(primFieldsPM[q],ymin,ymax)
for filename in sorted(os.listdir("output")):
if filename.startswith("ctmrgsvals_detail_"):
h_xi = dict()
md = md5()
with open("output/" + filename, "r") as f:
chi = filter(lambda s: s.find("chi=") != -1, filename[:-4].split("_"))[0].split("=")[-1]
q = int(filter(lambda s: s.find("q=") != -1, filename[:-4].split("_"))[0].split("=")[-1])
svalerr = float(filter(lambda s: s.find("svalerr=") != -1, filename[:-4].split("_"))[0].split("=")[-1])
for line in f:
fields = line.split(" ")
h = -float(fields[0])
xi = float(fields[1])
if not h_xi.has_key(h):
h_xi[h] = []
if xi > 0:
h_xi[h].append(xi)
md.update(line)
#md.update("foo")
if md.hexdigest() != read_hash("plots/" + filename.split(".dat")[0] + ".md5"):
print filename
for h in h_xi:
xi = h_xi[h]
if h < 1 and len(xi) > 1:
xi = scale_svals_fm(q, xi)
plt.plot([h]*len(xi), xi, "b+")
plt.ylabel("$-a \\log(\\xi) + b$")
ymin,ymax = plt.axes().get_ylim()
plt.axes().set_yticks(get_fm_yticks(q, ymin, ymax))
plt.axes().set_yticklabels(get_fm_yticklabels(q, ymin, ymax))
plt.grid(True)
plt.title("1D-QPotts ground state via 2D-CTMRG ($q = " + str(q) + "$, $\\chi = " + chi + "$, $\\Delta \\xi = 10^{" + "{:.0f}".format(np.log10(svalerr)) + "}$)")
plt.xlabel("$-h$")
s = filename.split(".dat")[0]
s = s.split("_detail_")
s = s[0] + "_detail_fm_" + s[1]
plt.savefig("plots/" + s + ".png", dpi=300)
plt.close()
for h in h_xi:
xi = h_xi[h]
if h > 1 and len(xi) > 1:
xi = scale_svals_pm(q, xi)
plt.plot([h]*len(xi), xi, "b+")
plt.ylabel("$-a \\log(\\xi) + b$")
ymin,ymax = plt.axes().get_ylim()
plt.axes().set_yticks(get_pm_yticks(q, ymin, ymax))
plt.axes().set_yticklabels(get_pm_yticklabels(q, ymin, ymax))
plt.grid(True)
plt.title("1D-QPotts ground state via 2D-CTMRG ($q = " + str(q) + "$, $\\chi = " + chi + "$, $\\Delta \\xi = 10^{" + "{:.0f}".format(np.log10(svalerr)) + "}$)")
plt.xlabel("$-h$")
s = filename.split(".dat")[0]
s = s.split("_detail_")
s = s[0] + "_detail_pm_" + s[1]
plt.savefig("plots/" + s + ".png", dpi=300)
plt.close()
write_hash("plots/" + filename.split(".dat")[0] + ".md5", md.hexdigest())
"""
hDegeneracyLabel1 = 0.5
hDegeneracyLabel2 = 1.5
primaryField1PM = [ Fraction(0), Fraction(0), Fraction(0), Fraction(0), Fraction(0), Fraction(0), Fraction(0), Fraction(0), Fraction(0) ]
primaryField2PM = [ Fraction(1), Fraction(1), Fraction(1,2), Fraction(2,3), Fraction(1), Fraction(1), Fraction(1), Fraction(1), Fraction(1) ]
primaryField1FM = [ Fraction(0), Fraction(0), Fraction(1,16), Fraction(1,8), Fraction(0), Fraction(0), Fraction(0), Fraction(0), Fraction(0) ]
primaryField2FM = [ Fraction(1), Fraction(1), Fraction(17,16), Fraction(13,8), Fraction(1), Fraction(1), Fraction(1), Fraction(1), Fraction(1) ]
yTicks1 = []
yTickLabels1 = []
yTicks2 = []
yTickLabels2 = []
for q in xrange(9):
f = [primaryField1FM[q], primaryField2FM[q]]
t = list()
l = list()
for k in xrange(len(f)):
for j in xrange(10):
x = f[k] + j
if not float(x) in t:
t.append(float(x))
s = "$0" if f[k].numerator == 0 else "$\\frac{" + str(f[k].numerator) + "}{" + str(f[k].denominator) + "}"
if j > 0:
s += "+" + str(j)
s += "$"
l.append(s)
yTicks1.append(t)
yTickLabels1.append(l)
f = [primaryField1PM[q], primaryField2PM[q]]
t = list()
l = list()
for k in xrange(len(f)):
for j in xrange(10):
x = f[k] + j
if not float(x) in t:
t.append(float(x)) |
timrchavez/capomastro | jenkins/tests/test_models.py | Python | mit | 1,239 | 0 | from django.test import TestCase
from httmock import HTTMock
from jenkinsapi.jenkins import Jenkins
from jenkins.models import Build, JobType
from .helpers import mock_url
from .factories import BuildFactory, JenkinsServerFactory
class JenkinsServerTest(TestCase):
def test_get_client(self):
"""
JenkinsServer.get_client should return a Jenkins client configured
appropriately.
"""
server = JenkinsServerFactory.create()
mock_request = mock_url(
r"\/api\/python$", "fixture1")
with HTTMock(mock_request):
client = server.get_client()
self.assertIsInstance(client, Jenkins)
class BuildTest(TestCase):
def test_ordering(se | lf):
"""Builds should be ordered in reverse build order by default."""
builds = BuildFactory.create_batch(5)
build_numbers = sorted([x.number for x in builds], reverse=True)
self.assertEqual(
build_numbers,
list(Build.objects.all().values | _list("number", flat=True)))
class JobTypeTest(TestCase):
def test_instantiation(self):
"""We can create JobTypes."""
JobType.objects.create(
name="my-test", config_xml="testing xml")
|
konini-school/pibot26 | module3.py | Python | gpl-2.0 | 541 | 0.003697 | ##############################################
# File Name: module3.py
# Version: 1.0
# Team No.: 26
# Team Name:
# Date: 28 Oct 15
##############################################
import RPi.GPIO as GPIO
import time
imp | ort sys, tty, termios
print '\nHi, I am PiBot, your very own learning robot..\n'
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT)
ledOnTime = 5
# Turn on LED
print 'Turn the LED on'
GPIO.output(7, True)
time.sleep(ledOnTime | )
print 'Turn the LED off'
GPIO.output(7, False)
GPIO.cleanup()
print "\nEnd of program"
|
mhl/gib | gitsetup.py | Python | lgpl-2.1 | 10,749 | 0.004279 | from configparser import RawConfigParser
import os
import re
from subprocess import call, check_call, Popen, PIPE, STDOUT
import sys
from errors import Errors
from general import (
exists_and_is_directory, shellquote, print_stderr
)
from githelpers import has_objects_and_refs
class OptionFrom:
'''enum-like values to indicate the source of different options, used in
directory_to_backup_from, git_directory_from and branch_from'''
COMMAND_LINE = 1
CONFIGURATION_FILE = 2
DEFAULT_VALUE = 3
string_versions = { COMMAND_LINE : "command line",
CONFIGURATION_FILE : "configuration file",
DEFAULT_VALUE : "default value" }
class GibSetup:
def __init__(self, command_line_options):
self.configuration_file = '.gib.conf'
self.directory_to_backup = None
self.directory_to_backup_from = None
self.git_directory = None
self.git_directory_from = None
self.branch = None
self.branch_from = None
if command_line_options.directory:
self.directory_to_backup = command_line_options.directory
self.directory_to_backup_from = OptionFrom.COMMAND_LINE
else:
if 'HOME' not in os.environ:
# Then we can't use HOME as default directory:
print_stderr("The HOME environment variable was not set")
sys.exit(Errors.STRANGE_ENVIRONMENT)
self.directory_to_backup = os.environ['HOME']
self.directory_to_backup_from = OptionFrom.DEFAULT_VALUE
# We need to make sure that this is an absolute path before
# changing directory:
self.directory_to_backup = os.path.abspath(self.directory_to_backup)
if not exists_and_is_directory(self.directory_to_backup):
sys.exit(Errors.DIRECTORY_TO_BACKUP_MISSING)
# Now we know the directory that we're backing up, try to load the
# config file:
configuration = RawConfigParser()
configuration.read(os.path.join(self.directory_to_backup,
self.configuration_file))
# Now set the git directory:
if command_line_options.git_directory:
self.git_directory = command_line_options.git_directory
self.git_directory_from = OptionFrom.COMMAND_LINE
elif configuration.has_option('repository','git_directory'):
self.git_directory = configuration.get(
'repository','git_directory'
)
self.git_directory_from = OptionFrom.CONFIGURATION_FILE
else:
self.git_directory = os.path.join(self.directory_to_backup,'.git')
self.git_directory_from = OptionFrom.DEFAULT_VALUE
if not os.path.isabs(self.git_directory):
print_stderr("The git directory must be an absolute path.")
sys.exit(Errors.GIT_DIRECTORY_RELATIVE)
# And finally the branch:
if command_line_options.branch:
self.branch = command_line_options.branch
self.branch_from = OptionFrom.COMMAND_LINE
elif configuration.has_option('repository','branch'):
self.branch = configuration.get('repository','branch')
self.branch_from = OptionFrom.CONFIGURATION_FILE
else:
self.branch = 'master'
self.branch_from = OptionFrom.DEFAULT_VALUE
# Check that the git_directory ends in '.git':
if not re.search('\.git/*$',self.git_directory):
message = "The git directory ({}) did not end in '.git'"
print_stderr(message.format(self.git_directory))
sys.exit(Errors.BAD_GIT_DIRECTORY)
# Also check that it actually exists:
if not os.path.exists(self.git_directory):
message = "The git directory '{}' does not exist."
print_stderr(message.format(self.git_directory))
sys.exit(Errors.GIT_DIRECTORY_MISSING)
def get_directory_to_backup(self):
return self.directory_to_backup
def get_git_directory(self):
return self.git_directory
def get_file_list_directory(self):
return os.path.join(
self.get_git_directory(),
'file-lists'
)
def get_branch(self):
return self.branch
def print_settings(self):
print_stderr('''Settings for backup:
backing up the directory {} (set from the {})
... to the branch "{}" (set from the {})
... in the git repository {} (set from the {})'''.format(
self.directory_to_backup,
OptionFrom.string_versions[self.directory_to_backup_from],
self.branch,
OptionFrom.string_versions[self.branch_from],
self.git_directory,
OptionFrom.string_versions[self.git_directory_from]),
)
def get_invocation(self):
'''Return an invocation that would run the script with options
that will set directory_to_backup, git_directory and branch as on
this invocation. After init has been called, we can just specify
the directory to backup, since the configuration file .gib.conf in
that directory will store the git_directory and the branch. If
the directory to backup is just the current user's home directory,
then that doesn't need to be specified either.'''
invocation = sys.argv[0]
if self.directory_to_backup != os.environ['HOME']:
invocation += " " + "--directory="
invocation += shellquote(self.directory_to_backup)
return invocation
def git(self,rest_of_command):
'''Create an list (suitable for passing to subprocess.call or
subprocess.check_call) which runs a git command with the correct
git directory and work tree'''
return [ "git",
"--git-dir="+self.git_directory,
"--work-tree="+self.directory_to_backup ] + rest_of_command
def git_for_shell(self):
'''Returns a string with shell-safe invocation of git which can be used
in calls that a | re subj | ect to shell interpretation.'''
command = "git --git-dir="+shellquote(self.git_directory)
command += " --work-tree="+shellquote(self.directory_to_backup)
return command
def git_initialized(self):
'''Returns True if it seems as if the git directory has already
been intialized, and returns False otherwise'''
return has_objects_and_refs(self.git_directory)
def abort_if_not_initialized(self):
'''Check that the git repository exists and exit otherwise'''
if not self.git_initialized():
message = "You don't seem to have initialized {} for backup."
print_stderr(message.format(self.directory_to_backup))
message = "Please use '{} init' to initialize it"
print_stderr(message.format(self.get_invocation()))
sys.exit(Errors.REPOSITORY_NOT_INITIALIZED)
def check_ref(self,ref):
'''Returns True if a ref can be resolved to a commit and False
otherwise.'''
return 0 == call(
self.git(["rev-parse","--verify",ref]),
stdout=open('/dev/null','w'),
stderr=STDOUT
)
def check_tree(self,tree):
'''Returns True if 'tree' can be understood as a tree, e.g. with
"git ls-tree" or false otherwise'''
with open('/dev/null','w') as null:
return 0 == call(
self.git(["ls-tree",tree]),
stdout=null,
stderr=STDOUT
)
def set_HEAD_to(self,ref):
'''Update head to point to a particular branch, without touching
the index or the working tree'''
check_call(
self.git(["symbolic-ref","HEAD","refs/heads/{}".format(ref)])
)
def currently_on_correct_branch(self):
'''Return True if HEAD currently points to 'self.branch', and
return False otherwise.'''
p = Popen(self.git(["symbolic-ref","HEAD"]),stdout=PIPE)
c = p.communicate()
if 0 != p.returncode:
|
thebjorn/dkcoverage | dkcoverage/rtestcover.py | Python | gpl-2.0 | 3,325 | 0.000902 | # -*- coding: utf-8 -*-
"""Called from datakortet\dkcoverage.bat to record regression test
coverage data in dashboard.
"""
import re
import os
# import sys
# import time
import glob
# from datakortet.dkdash.status import send_status
# from datakortet.utils import root
from coverage import coverage, misc
from coverage.files import find_python_files
from coverage.parser import CodeParser
from coverage.config import CoverageConfig
from . import dkenv
def linecount(fname, excludes):
"""Return the number of lines in ``fname``, counting the same way that
coverage does.
"""
cp = CodeParser(filename=fname,
exclude=re.compile(misc.join_regex(excludes)))
lines, excluded = cp.parse_source()
return len(lines), len(excluded)
def skiplist():
cov = coverage(config_file=os.path.join(dkenv.DKROOT, '.coveragerc'))
cwd = os.getcwd()
skippatterns = [os.path.normpath(p.replace(cwd, d | kenv.DKROOT)) for p in cov.omit]
_skipli | st = []
for pat in skippatterns:
_skiplist += glob.glob(pat)
return set(_skiplist)
def abspath(fname):
# cwd = os.getcwd()
res = os.path.normcase(
os.path.normpath(
os.path.abspath(fname))) #.replace(cwd, root()))))
return res
def valid_file(fname, _skiplist=None):
_skiplist = _skiplist or skiplist()
if fname.endswith('.py'):
absfname = abspath(fname)
if absfname not in _skiplist:
fpath, name = os.path.split(fname)
if name != '__init__.py' or os.stat(absfname).st_size > 0:
return absfname
return False
def python_files(folder):
_skiplist = skiplist()
for fname in find_python_files(folder):
f = valid_file(fname, _skiplist)
if f:
yield f
def pylinecount(rt=None, verbose=False):
"""Count Python lines the same way that coverage does.
"""
res = 0
cov = coverage(config_file=os.path.join(dkenv.DKROOT, '.coveragerc'))
rt = rt or dkenv.DKROOT
_skiplist = skiplist()
exclude_lines = cov.get_exclude_list()
for fname in python_files(rt):
if os.path.normpath(fname) not in _skiplist:
lcount, excount = linecount(fname, exclude_lines)
if verbose:
print '%5d %5d %s' % (lcount, excount, fname)
res += lcount
else:
if verbose:
print '-----', fname
return res
# def report_test_coverage(reportline, dashboard=True):
# start = time.time()
# parts = reportline.split()
#
# stmts = int(parts[1])
# skipped = int(parts[2])
# covered = stmts - skipped
# print >> sys.stderr, "COVERED:", covered
#
# linecount = pylinecount()
# print >> sys.stderr, "TOTAL: ", linecount
#
# coverage = 100.0 * covered / linecount
# severity = 'green'
# if coverage < 85:
# severity = 'yellow'
# if coverage < 60:
# severity = 'red'
#
# sys.stdout.write("Coverage: " + str(coverage) + '\n')
#
# if dashboard:
# send_status(tag='code.testcov',
# value=coverage,
# duration=time.time() - start,
# server='appsrv')
# if __name__ == "__main__":
# intxt = sys.stdin.read()
# report_test_coverage(intxt)
# sys.exit(0)
|
kaltwang/latenttrees | latenttrees/test_lt_helper.py | Python | bsd-3-clause | 880 | 0.003409 | import unittest
import numpy as np
import latenttrees.lt_helper | as lth
from scipy.stats import norm
class TestLtHelper(unittest.TestCase):
pass
def test_norm_logpdf_generator(x, mu, std):
def test(self):
scipy_d = norm(mu, std) # scipy normal distribution
logpdf_scipy = scipy_d.logpdf(x)
logpdf = lth.norm_logpdf(x, mu, std)
# self.assertEqual(True | , False)
np.testing.assert_allclose(logpdf, logpdf_scipy)
return test
if __name__ == '__main__':
for i in range(10):
test_name = 'test_norm_logpdf_{}'.format(i)
d1 = 100
d2 = 1
mu = np.random.randn(d1, d2)
std = np.random.rand(d1, d2)
x = (np.random.rand(d1, d2) * 20) - 10
test = test_norm_logpdf_generator(x, mu, std)
setattr(TestLtHelper, test_name, test)
unittest.main()
|
vanita5/TwittnukerGCMServer | app.py | Python | gpl-3.0 | 6,900 | 0.006667 | import os, binascii
from dateutil import parser as dateparser
from bottle import run, get, post, delete, install, HTTPError, request
from bottle_sqlite import SQLitePlugin
from dbsetup import init_db
from google_auth import gauth
from app_conf import DBNAME, DEBUG
from app_gcm import send_notification
init_db(DBNAME)
install(SQLitePlugin(dbfile=DBNAME))
install(gauth)
def prnt(s):
if DEBUG:
print s
def settings_to_dict(row):
resp = dict(accountid = row['accountid'],
# Convert integer to boolean
nmentions = (row['nmentions'] == 1),
ndms = (row['ndms'] == 1),
nfollower = (row['nfollower'] == 1))
prnt('Response: ' + str(resp))
return resp
def account_from_db(db, accountid, userid):
prnt('\nCreate response...')
args = [accountid, userid]
stmt = "SELECT * FROM ACCOUNTS WHERE accountid IS ? AND userid IS ?"
response = db.execute(stmt, args).fetchone()
return settings_to_dict(response)
@get('/')
@get('/settings')
def get_settings(db, userid):
'''Return a list of the settings per twitter account to show in the app'''
prnt('GET: /settings')
args = [userid]
settings = []
stmt = 'SELECT * FROM accounts WHERE userid IS ?'
for row in db.execute(stmt, args):
settings.append(settings_to_dict(row))
prnt('GOT SETTINGS: ' + settings)
return dict(settings = settings)
@post('/settings')
def set_settings(db, userid):
'''Set settings in the db and apply them (TODO)'''
prnt('POST: /settings')
prnt('Data: ' + str(request.json))
if 'application/json' not in request.content_type:
return HTTPError(415, "Only json is accepted")
# Check required fields
if ('accountid' not in request.json or request.json['accountid'] is None
or len(request.json['accountid']) < 1):
return HTTPError(400, "Must specify an account.")
if ('nmentions' not in request.json or request.json['nmentions'] is None
or len(request.json['nmentions']) < 1):
request.json['nmentions'] = 0
if ('ndms' not in request.json or request.json['ndms'] is None
or len(request.json['ndms']) < 1):
request.json['ndms'] = 0
if ('nfollower' not in request.json or request.json['nfollower'] is None
or len(request.json['nfollower']) < 1):
request.json['nfollower'] = 0
args = [request.json['nmentions'],
request.json['ndms'],
request.json['nfollower'],
userid,
request.json['accountid']]
stmt = 'UPDATE accounts SET nmentions = ?, ndms = ?, nfollower = ? WHERE userid IS ? AND accountid IS ?'
db.execute(stmt, args)
if db.total_changes > 0:
return {}
else:
return HTTPError(500, "Updating settings failed!")
@post('/registergcm')
def register_gcm(db, userid):
'''Adds a registration id for a user to the database.
Returns nothing.'''
prnt('POST: /registergcm')
prnt('Data: ' + str(request.json))
if 'application/json' not in request.content_type:
return HTTPError(415, "Request needs to be JSON")
# Check required fields
if ('regid' not in request.json or request.json['regid'] is None
or len(request.json['regid']) < 1):
return HTTPError(400, "No registration id was given")
db.execute('INSERT INTO gcm (userid, regid) VALUES(?, ?)',
[userid, request.json['regid']])
if db.total_changes > 0:
return {}
else:
return HTTPError(500, "Registration failed!")
@post('/unregistergcm')
def unregister_gcm(db, userid):
'''Completely removes the user from the notification service'''
prnt('POST: /unregistergcm')
prnt('Data: ' + str(request.json))
if 'application/json' not in request.content_type:
return HTTPError(415, "Request needs to be JSON")
# Check required fields
if ('regid' not in request.json or request.json['regid'] is None
or len(request.json['regid']) < 1):
return HTTPError(400, "No registration id was given")
db.execute('DELETE FROM gcm WHERE userid IS ? AND regid IS ?', [userid, request.json['regid']])
# TODO Remove user from Twitter stream
if db.total_changes > 0:
return {}
else:
return HTTPError(500, "User does not exist or has already been removed.")
@post('/removeaccount')
def remove_account(db, userid):
'''Remove a Twitter account from monitoring'''
prnt('POST: /removeaccount')
prnt('Data: ' + str(request.json))
if 'application/json' not in request.content_type:
return HTTPError(415, "Request needs to be JSON")
# Check required fields
if ('accountid' not in request.json or request.json['accountid'] is None
or len(request.json['accountid']) < 1):
return HTTPError(400, "No account id was given!")
args = [userid,
request.json['accountid']]
stmt = 'DELETE FROM accounts WHERE userid IS ? AND accountid IS ?'
db.execute(stmt, args)
if db.total_changes > 0:
# TODO remove account from user stream
return {}
else:
return HTTPError(500, "Account could not be romved or has already been removed.")
@post('/addaccount')
def add_account(db, userid):
'''Adds a Twitter account id to monitor'''
prnt('POST: /addaccount')
prnt('Data: ' + str(request.json))
if 'application/json' not in request.content_type:
return HTTPError(415, "Request needs to be JSON")
# Check required fields
if ('accountid' not in request.json or request.json['accountid'] is None
or len(request.json['accountid']) < 1):
return HTTPError(400, "No account id was given!")
if ('nmentions' not in request.json or request.json['nmentions'] is None
or len(request.json['nmentions']) < 1):
request.json['nmentions'] = 0
if ('ndms' not in request.json or request.json['ndms'] is None
or len(request.json['ndms']) < 1):
request.json['ndms'] = 0
if ('nfollower' not in request.json or request.json['nfollower'] is None
or len(request.json['nfollower']) < 1):
request.json['nfollower'] = 0
args = [request.json['nmentions'],
request.json['ndms'],
request.json['nfollower'],
userid,
request.json['accountid']]
stmt = 'INSERT INTO accounts (nmentions, ndms, nfollower, userid, accountid) VALUES(?, ?, ?, ?, ?)'
db.execute(stmt, args)
if db.total_changes > 0:
# TODO add account to user stream
# TODO Return new Account
return account_from_db(db, request.json['accountid'], userid)
| else:
prnt('NO CHANGES ON DB')
return HTTPError(500, "Account id could not be added to the database.")
if __name__ == '__main__':
run(host = '0.0.0.0', port = 5050, re | loader = True, debug = True)
|
Instanssi/Instanssi.org | Instanssi/users/views.py | Python | mit | 2,915 | 0.002058 | # -*- coding: utf-8 -*-
from Instanssi.common.auth import user_access_required
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.contrib import auth
from django.urls import reverse
from Instanssi.users.forms import OpenIDLoginForm, DjangoLoginForm, ProfileForm
from Instanssi.common.mis | c import get_url_local_path
AUTH_METHODS = [
# Short name, social-auth, friendly name
('facebook', 'facebook', 'Facebook'),
('google', 'google-oauth2', 'Google'),
('twitter', 'twitter', 'Twitter'),
('github', 'github', 'Github'),
('battlenet', 'battlenet-oauth2', 'Battle.net'),
('steam', 'steam', 'Steam'),
]
def login(request):
if request.user | .is_authenticated:
return HttpResponseRedirect(reverse('users:profile'))
# Get referer for redirect
# Make sure that the referrer is a local path.
if 'next' in request.GET:
next_page = get_url_local_path(request.GET['next'])
else:
next_page = get_url_local_path(request.META.get('HTTP_REFERER', reverse('users:profile')))
# Test django login form
if request.method == "POST":
djangoform = DjangoLoginForm(request.POST)
if djangoform.is_valid():
djangoform.login(request)
return HttpResponseRedirect(djangoform.cleaned_data['next'])
else:
djangoform = DjangoLoginForm(next=next_page)
# Openid login form
# The form will be handled elsewhere; this is only for rendering the form.
openidform = OpenIDLoginForm(next=next_page)
# Render response
return render(request, "users/login.html", {
'djangoform': djangoform,
'openidform': openidform,
'next': next_page,
'AUTH_METHODS': AUTH_METHODS
})
def loggedout(request):
return render(request, "users/loggedout.html")
@user_access_required
def profile(request):
from social_django.models import DjangoStorage
if request.method == "POST":
profileform = ProfileForm(request.POST, instance=request.user, user=request.user)
if profileform.is_valid():
profileform.save()
return HttpResponseRedirect(reverse('users:profile'))
else:
profileform = ProfileForm(instance=request.user, user=request.user)
# Get all active providers for this user
active_providers = []
for social_auth in DjangoStorage.user.get_social_auth_for_user(request.user):
active_providers.append(social_auth.provider)
# Providers list
methods = []
for method in AUTH_METHODS:
methods.append(method + (method[1] in active_providers, ))
return render(request, "users/profile.html", {
'profileform': profileform,
'active_providers': active_providers,
'AUTH_METHODS': methods
})
def logout(request):
auth.logout(request)
return HttpResponseRedirect(reverse('users:loggedout'))
|
tartavull/google-cloud-python | datastore/tests/unit/test_query.py | Python | apache-2.0 | 26,883 | 0 | # Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class TestQuery(unittest.TestCase):
_PROJECT = 'PROJECT'
@staticmethod
def _get_target_class():
from google.cloud.datastore.query import Query
return Query
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _make_client(self):
return _Client(self._PROJECT)
def test_ctor_defaults(self):
client = self._make_client()
query = self._make_one(client)
self.assertIs(query._client, client)
self.assertEqual(query.project, client.project)
self.assertIsNone(query.kind)
self.assertEqual(query.namespace, client.namespace)
self.assertIsNone(query.ancestor)
self.assertEqual(query.filters, [])
self.assertEqual(query.projection, [])
self.assertEqual(query.order, [])
self.assertEqual(query.distinct_on, [])
def test_ctor_explicit(self):
from google.cloud.datastore.key import Key
_PROJECT = 'OTHER_PROJECT'
_KIND = 'KIND'
_NAMESPACE = 'OTHER_NAMESPACE'
client = self._make_client()
ancestor = Key('ANCESTOR', 123, project=_PROJECT)
FILTERS = [('foo', '=', 'Qux'), ('bar', '<', 17)]
PROJECTION = ['foo', 'bar', 'baz']
ORDER = ['foo', 'bar']
DISTINCT_ON = ['foo']
query = self._make_one(
client,
kind=_KIND,
project=_PROJECT,
namespace=_NAMESPACE,
ancestor=ancestor,
filters=FILTERS,
projection=PROJECTION,
order=ORDER,
distinct_on=DISTINCT_ON,
)
self.assertIs(query._client, client)
self.assertEqual(query.project, _PROJECT)
self.assertEqual(query.kind, _KIND)
self.assertEqual(query.namespace, _NAMESPACE)
self.assertEqual(query.ancestor.path, ancestor.path)
self.assertEqual(query.filters, FILTERS)
self.assertEqual(query.projection, PROJECTION)
self.assertEqual(query.order, ORDER)
self.assertEqual(query.distinct_on, DISTINCT_ON)
def test_ctor_bad_projection(self):
BAD_PROJECTION = object()
self.assertRaises(TypeError, self._make_one, self._make_client(),
projection=BAD_PROJECTION)
def test_ctor_bad_order(self):
BAD_ORDER = object()
self.assertRaises(TypeError, self._make_one, self._make_client(),
order=BAD_ORDER)
def test_ctor_bad_distinct_on(self):
BAD_DISTINCT_ON = object()
self.assertRaises(TypeError, self._make_one, self._make_client(),
distinct_on=BAD_DISTINCT_ON)
def test_ctor_bad_filters(self):
FILTERS_CANT_UNPACK = [('one', 'two')]
self.assertRaises(ValueError, self._make_one, self._make_client(),
filters=FILTERS_CANT_UNPACK)
def test_namespace_setter_w_non_string(self):
query = self._make_one(self._make_client())
def _assign(val):
query.namespace = val
self.assertRaises(ValueError, _assign, object())
def test_namespace_setter(self):
_NAMESPACE = 'OTHER_NAMESPACE'
query = self._make_one(self._make_client())
query.namespace = _NAMESPACE
self.assertEqual(query.namespace, _NAMESPACE)
def test_kind_setter_w_non_string(self):
query = self._make_one(self._make_client())
def _assign(val):
query.kind = val
self.assertRaises(TypeError, _assign, object())
def test_kind_setter_wo_existing(self):
_KIND = 'KIND'
query = self._make_one(self._make_client())
query.kind = _KIND
self.assertEqual(query.kind, _KIND)
def test_kind_setter_w_existing(self):
_KIND_BEFORE = 'KIND_BEFORE'
_KIND_AFTER = 'KIND_AFTER'
query = self._make_one(self._make_client(), kind=_KIND_BEFORE)
self.assertEqual | (query.kind, _KIND_BEFORE)
query.kind = _KIND_AFTER
self.asser | tEqual(query.project, self._PROJECT)
self.assertEqual(query.kind, _KIND_AFTER)
def test_ancestor_setter_w_non_key(self):
query = self._make_one(self._make_client())
def _assign(val):
query.ancestor = val
self.assertRaises(TypeError, _assign, object())
self.assertRaises(TypeError, _assign, ['KIND', 'NAME'])
def test_ancestor_setter_w_key(self):
from google.cloud.datastore.key import Key
_NAME = u'NAME'
key = Key('KIND', 123, project=self._PROJECT)
query = self._make_one(self._make_client())
query.add_filter('name', '=', _NAME)
query.ancestor = key
self.assertEqual(query.ancestor.path, key.path)
def test_ancestor_deleter_w_key(self):
from google.cloud.datastore.key import Key
key = Key('KIND', 123, project=self._PROJECT)
query = self._make_one(client=self._make_client(), ancestor=key)
del query.ancestor
self.assertIsNone(query.ancestor)
def test_add_filter_setter_w_unknown_operator(self):
query = self._make_one(self._make_client())
self.assertRaises(ValueError, query.add_filter,
'firstname', '~~', 'John')
def test_add_filter_w_known_operator(self):
query = self._make_one(self._make_client())
query.add_filter('firstname', '=', u'John')
self.assertEqual(query.filters, [('firstname', '=', u'John')])
def test_add_filter_w_all_operators(self):
query = self._make_one(self._make_client())
query.add_filter('leq_prop', '<=', u'val1')
query.add_filter('geq_prop', '>=', u'val2')
query.add_filter('lt_prop', '<', u'val3')
query.add_filter('gt_prop', '>', u'val4')
query.add_filter('eq_prop', '=', u'val5')
self.assertEqual(len(query.filters), 5)
self.assertEqual(query.filters[0], ('leq_prop', '<=', u'val1'))
self.assertEqual(query.filters[1], ('geq_prop', '>=', u'val2'))
self.assertEqual(query.filters[2], ('lt_prop', '<', u'val3'))
self.assertEqual(query.filters[3], ('gt_prop', '>', u'val4'))
self.assertEqual(query.filters[4], ('eq_prop', '=', u'val5'))
def test_add_filter_w_known_operator_and_entity(self):
from google.cloud.datastore.entity import Entity
query = self._make_one(self._make_client())
other = Entity()
other['firstname'] = u'John'
other['lastname'] = u'Smith'
query.add_filter('other', '=', other)
self.assertEqual(query.filters, [('other', '=', other)])
def test_add_filter_w_whitespace_property_name(self):
query = self._make_one(self._make_client())
PROPERTY_NAME = ' property with lots of space '
query.add_filter(PROPERTY_NAME, '=', u'John')
self.assertEqual(query.filters, [(PROPERTY_NAME, '=', u'John')])
def test_add_filter___key__valid_key(self):
from google.cloud.datastore.key import Key
query = self._make_one(self._make_client())
key = Key('Foo', project=self._PROJECT)
query.add_filter('__key__', '=', key)
self.assertEqual(query.filters, [('__key__', '=', key)])
def test_filter___key__not_equal_operator(self):
from google.cloud.datastore.key import Key
key = Key('Foo', project=self._PROJECT)
query = self._make_one(self._make_client())
query.add_filter('__key__', '<', key)
self.assertEqual(query.filters, [('__key__', '<', key)])
def test_filter___key__inv |
rsalmaso/django-babeljs | babeljs/execjs/__init__.py | Python | mit | 8,122 | 0.001847 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2018, Raffaele Salmaso <raffaele@salmaso.org>
# Copyright (c) 2012 Omoto Kenji
# Copyright (c) 2011 Sam Stephenson
# Copyright (c) 2011 Josh Peek
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
Run JavaScript code from Python.
PyExecJS is a porting of ExecJS from Ruby.
PyExecJS automatically picks the best runtime available to evaluate your JavaScript program,
then returns the result to you as a Python object.
A short example:
>>> import reactjs.execjs
>>> execjs.eval("'red yellow blue'.split(' ')")
['red', 'yellow', 'blue']
>>> ctx = execjs.compile("""
... function add(x, y) {
... return x + y;
... }
... """)
>>> ctx.call("add", 1, 2)
3
'''
# changes from PyExecJS:
# * in javascript bootstrap do
# result = program().code;
# instead of
# result = program();
# so result is the transpiled code
# (untested on all platform but nodejs)
# * the temp js file is prefixed as babeljs and not execjs
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import os.path
try:
from collections import OrderedDic | t
except ImportError:
| from ordereddict import OrderedDict
from .exceptions import Error, RuntimeError, ProgramError, RuntimeUnavailable
from .runtime import Runtime, PyV8Runtime
__all__ = [
"get", "register", "runtimes", "get_from_environment", "exec_", "eval", "compile",
"Runtime", "Context",
"Error", "RuntimeError", "ProgramError", "RuntimeUnavailable",
]
def register(name, runtime):
'''Register a JavaScript runtime.'''
_runtimes[name] = runtime
def get(name=None):
"""
Return a appropriate JavaScript runtime.
If name is specified, return the runtime.
"""
if name is None:
return _auto_detect()
try:
runtime = runtimes()[name]
except KeyError:
raise RuntimeUnavailable("{name} runtime is not defined".format(name=name))
else:
if not runtime.is_available():
raise RuntimeUnavailable(
"{name} runtime is not available on this system".format(name=runtime.name))
return runtime
def runtimes():
"""return a dictionary of all supported JavaScript runtimes."""
return dict(_runtimes)
def available_runtimes():
"""return a dictionary of all supported JavaScript runtimes which is usable"""
return dict((name, runtime) for name, runtime in _runtimes.items() if runtime.is_available())
def _auto_detect():
runtime = get_from_environment()
if runtime is not None:
return runtime
for runtime in _runtimes.values():
if runtime.is_available():
return runtime
raise RuntimeUnavailable("Could not find a JavaScript runtime.")
def get_from_environment():
'''
Return the JavaScript runtime that is specified in EXECJS_RUNTIME environment variable.
If EXECJS_RUNTIME environment variable is empty or invalid, return None.
'''
try:
name = os.environ["EXECJS_RUNTIME"]
except KeyError:
return None
if not name:
#name is None or empty str
return None
return get(name)
def eval(source):
return get().eval(source)
def exec_(source):
return get().exec_(source)
def compile(source):
return get().compile(source)
_runtimes = OrderedDict()
_runtimes['PyV8'] = PyV8Runtime()
for command in ["nodejs", "node"]:
_runtimes["Node"] = runtime = Runtime(
name="Node.js (V8)",
command=[command],
encoding='UTF-8',
runner_source=r"""(function(program, execJS) { execJS(program) })(function() { #{source}
}, function(program) {
var output;
var print = function(string) {
process.stdout.write('' + string + '\n');
};
try {
result = program().code;
print('');
if (typeof result == 'undefined' && result !== null) {
print('["ok"]');
} else {
try {
print(JSON.stringify(['ok', result]));
} catch (err) {
print(err);
print('["err"]');
}
}
} catch (err) {
print(JSON.stringify(['err', '' + err]));
}
});""",
)
if runtime.is_available():
break
_runtimes['JavaScriptCore'] = Runtime(
name="JavaScriptCore",
command=["/System/Library/Frameworks/JavaScriptCore.framework/Versions/A/Resources/jsc"],
runner_source=r"""(function(program, execJS) { execJS(program) })(function() {
return eval(#{encoded_source});
}, function(program) {
var output;
try {
result = program().code;
print("");
if (typeof result == 'undefined' && result !== null) {
print('["ok"]');
} else {
try {
print(JSON.stringify(['ok', result]));
} catch (err) {
print('["err"]');
}
}
} catch (err) {
print(JSON.stringify(['err', '' + err]));
}
});
"""
)
_runtimes['SpiderMonkey'] = _runtimes['Spidermonkey'] = Runtime(
name="SpiderMonkey",
command=["js"],
runner_source=r"""(function(program, execJS) { execJS(program) })(function() { #{source}
}, function(program) {
#{json2_source}
var output;
try {
result = program().code;
print("");
if (typeof result == 'undefined' && result !== null) {
print('["ok"]');
} else {
try {
print(JSON.stringify(['ok', result]));
} catch (err) {
print('["err"]');
}
}
} catch (err) {
print(JSON.stringify(['err', '' + err]));
}
});
""")
_runtimes['JScript'] = Runtime(
name="JScript",
command=["cscript", "//E:jscript", "//Nologo"],
encoding="ascii",
runner_source=r"""(function(program, execJS) { execJS(program) })(function() {
return eval(#{encoded_source});
}, function(program) {
#{json2_source}
var output, print = function(string) {
string = string.replace(/[^\x00-\x7f]/g, function(ch){
return '\\u' + ('0000' + ch.charCodeAt(0).toString(16)).slice(-4);
});
WScript.Echo(string);
};
try {
result = program().code;
print("")
if (typeof result == 'undefined' && result !== null) {
print('["ok"]');
} else {
try {
print(JSON.stringify(['ok', result]));
} catch (err) {
print('["err"]');
}
}
} catch (err) {
print(JSON.stringify(['err', err.name + ': ' + err.message]));
}
});
"""
)
for _name, _command in [
['PhantomJS', 'phantomjs'],
['SlimerJS', 'slimerjs'],
]:
_runtimes[_name] = Runtime(
name=_name,
command=[_command],
runner_source=r"""
(function(program, execJS) { execJS(program) })(function() {
return eval(#{encoded_source});
}, function(program) {
var output;
var print = function(string) {
console.log('' + string);
};
try {
result = program().code;
print('')
if (typeof result == 'undefined' && result !== null) {
print('["ok"]');
} else {
try {
print(JSON.stringify(['ok', result]));
} catch (err) {
print('["err"]');
}
}
} catch (err) {
print(JSON.stringify(['err', '' + err]));
}
});
phantom.exit();
""")
|
adazey/Muzez | libs/nltk/classify/__init__.py | Python | gpl-3.0 | 4,636 | 0.000647 | # Natural Language Toolkit: Classifiers
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# Fo | r license information, see LICENSE.TXT
"""
Classes and interfaces for labeling tokens with category labels (or
"class labels"). Typically, labels are represented with strings
(such as ``'health'`` or ``'sports'``). Classifiers can be used to
perform a wide range of classification tasks. For example,
classifiers can be used...
- to classify documents by topic
- to classify ambiguous words by whic | h word sense is intended
- to classify acoustic signals by which phoneme they represent
- to classify sentences by their author
Features
========
In order to decide which category label is appropriate for a given
token, classifiers examine one or more 'features' of the token. These
"features" are typically chosen by hand, and indicate which aspects
of the token are relevant to the classification decision. For
example, a document classifier might use a separate feature for each
word, recording how often that word occurred in the document.
Featuresets
===========
The features describing a token are encoded using a "featureset",
which is a dictionary that maps from "feature names" to "feature
values". Feature names are unique strings that indicate what aspect
of the token is encoded by the feature. Examples include
``'prevword'``, for a feature whose value is the previous word; and
``'contains-word(library)'`` for a feature that is true when a document
contains the word ``'library'``. Feature values are typically
booleans, numbers, or strings, depending on which feature they
describe.
Featuresets are typically constructed using a "feature detector"
(also known as a "feature extractor"). A feature detector is a
function that takes a token (and sometimes information about its
context) as its input, and returns a featureset describing that token.
For example, the following feature detector converts a document
(stored as a list of words) to a featureset describing the set of
words included in the document:
>>> # Define a feature detector function.
>>> def document_features(document):
... return dict([('contains-word(%s)' % w, True) for w in document])
Feature detectors are typically applied to each token before it is fed
to the classifier:
>>> # Classify each Gutenberg document.
>>> from nltk.corpus import gutenberg
>>> for fileid in gutenberg.fileids(): # doctest: +SKIP
... doc = gutenberg.words(fileid) # doctest: +SKIP
... print fileid, classifier.classify(document_features(doc)) # doctest: +SKIP
The parameters that a feature detector expects will vary, depending on
the task and the needs of the feature detector. For example, a
feature detector for word sense disambiguation (WSD) might take as its
input a sentence, and the index of a word that should be classified,
and return a featureset for that word. The following feature detector
for WSD includes features describing the left and right contexts of
the target word:
>>> def wsd_features(sentence, index):
... featureset = {}
... for i in range(max(0, index-3), index):
... featureset['left-context(%s)' % sentence[i]] = True
... for i in range(index, max(index+3, len(sentence))):
... featureset['right-context(%s)' % sentence[i]] = True
... return featureset
Training Classifiers
====================
Most classifiers are built by training them on a list of hand-labeled
examples, known as the "training set". Training sets are represented
as lists of ``(featuredict, label)`` tuples.
"""
from nltk.classify.api import ClassifierI, MultiClassifierI
from nltk.classify.megam import config_megam, call_megam
from nltk.classify.weka import WekaClassifier, config_weka
from nltk.classify.naivebayes import NaiveBayesClassifier
from nltk.classify.positivenaivebayes import PositiveNaiveBayesClassifier
from nltk.classify.decisiontree import DecisionTreeClassifier
from nltk.classify.rte_classify import rte_classifier, rte_features, RTEFeatureExtractor
from nltk.classify.util import accuracy, apply_features, log_likelihood
from nltk.classify.scikitlearn import SklearnClassifier
from nltk.classify.maxent import (MaxentClassifier, BinaryMaxentFeatureEncoding,
TypedMaxentFeatureEncoding,
ConditionalExponentialClassifier)
from nltk.classify.senna import Senna
from nltk.classify.textcat import TextCat
|
ankanaan/chimera | src/chimera/util/output.py | Python | gpl-2.0 | 5,104 | 0.000392 | # Copyright 1998-2004 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Id: output.py,v 1.1 2006/03/06 18:13:31 henrique Exp $
import os
import sys
import re
havecolor = 1
dotitles = 1
spinpos = 0
spinner = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
esc_seq = "\x1b["
g_attr = {}
g_attr["normal"] = 0
g_attr["bold"] = 1
g_attr["faint"] = 2
g_attr["standout"] = 3
g_attr["underline"] = 4
g_attr["blink"] = 5
g_attr["overline"] = 6 # Why is overline actually useful?
g_attr["reverse"] = 7
g_attr["invisible"] = 8
g_attr["no-attr"] = 22
g_attr["no-standout"] = 23
g_attr["no-underline"] = 24
g_attr["no-blink"] = 25
g_attr["no-overline"] = 26
g_attr["no-reverse"] = 27
# 28 isn't defined?
# 29 isn't defined?
g_attr["black"] = 30
g_attr["red"] = 31
g_attr["green"] = 32
g_attr["yellow"] = 33
g_attr["blue"] = 34
g_attr["magenta"] = 35
g_attr["cyan"] = 36
g_attr["white"] = 37
# 38 isn't defined?
g_attr["default"] = 39
g_attr["bg_black"] = 40
g_attr["bg_red"] = 41
g_attr["bg_green"] = 42
g_attr["bg_yellow"] = 43
g_attr["bg_blue"] = 44
g_attr["bg_magenta"] = 45
g_attr["bg_cyan"] = 46
g_attr["bg_white"] = 47
g_attr["bg_default"] = 49
# make_seq("blue", "black", "normal")
def color(fg, bg="default", attr=["normal"]):
mystr = esc_seq[:] + "%02d" % g_attr[fg]
for x in [bg] + attr:
mystr += ";%02d" % g_attr[x]
return mystr + "m"
codes = {}
codes["reset"] = esc_seq + "39;49;00m"
codes["bold"] = esc_seq + "01m"
codes["faint"] = esc_seq + "02m"
codes["standout"] = esc_seq + "03m"
codes["underline"] = esc_seq + "04m"
codes["blink"] = esc_seq + "05m"
codes["overline"] = esc_seq + "06m" # Who made this up? Seriously.
codes["teal"] = esc_seq + "36m"
codes["turquoise"] = esc_seq + "36;01m"
codes["fuchsia"] = esc_seq + "35;01m"
codes["purple"] = esc_seq + "35m"
codes["blue"] = esc_seq + "34;01m"
codes["darkblue"] = esc_seq + "34m"
codes["green"] = esc_seq + "32;01m"
codes["darkgreen"] = esc_seq + "32m"
codes["yellow"] = esc_seq + "33;01m"
codes["brown"] = esc_seq + "33m"
codes["red"] = esc_seq + "31;01m"
codes["darkred"] = esc_seq + "31m"
def nc_len(mystr):
tmp = re.sub(esc_seq + "^m]+m", "", mystr)
return len(tmp)
def xtermTitle(mystr):
if havecolor and dotitles and "TERM" in os.environ and sys.stderr.isatty():
myt = os.environ["TERM"]
legal_terms = [
"xterm", "Eterm", "aterm", "rxvt", "screen", "kterm", "rxvt-unicode"]
for term in legal_terms:
if myt.startswith(term):
sys.stderr.write("\x1b]2;" + str(mystr) + "\x07")
sys.stderr.flush()
break
def xtermTitleReset():
if havecolor and dotitles and "TERM" in os.environ:
myt = os.environ["TERM"]
xtermTitle(os.environ["TERM"])
def notitles():
"turn off title setting"
dotitles = 0
def nocolor():
"turn off colorization"
havecolor = 0
for x in codes.keys():
codes[x] = ""
def resetColor():
return codes["reset"]
def ctext(color, text):
return codes[ctext] + text + codes["reset"]
def bold(text):
return codes["bold"] + text + codes["reset"]
def faint(text):
return codes["faint"] + text + codes["reset"]
def white(text):
return bold(text)
def teal(text):
return codes["teal"] + text + codes["reset"]
def turquoise(text):
return codes["turquoise"] + text + codes["reset"]
def darkteal(text):
return turquoise(text)
def fuscia(text): # Don't use this one. It's spelled wrong!
return codes["fuchsia"] + text + codes["reset"]
def fuchsia(text) | :
return codes["fuchsia"] + text + codes["reset"]
def purple(text):
return codes["purple"] + text + codes["reset"]
def blue(text):
return codes["blue"] + text + codes["reset"]
def darkblue(text):
return codes["darkblue"] + text + codes["reset"]
def green(text):
return codes["green"] + text + codes["reset"]
def darkgreen(text):
return codes["darkgreen"] + text + codes["reset"]
def yellow(text):
return codes["yellow"] + text + codes["rese | t"]
def brown(text):
return codes["brown"] + text + codes["reset"]
def darkyellow(text):
return brown(text)
def red(text):
return codes["red"] + text + codes["reset"]
def darkred(text):
return codes["darkred"] + text + codes["reset"]
def update_basic_spinner():
global spinner, spinpos
spinpos = (spinpos + 1) % 500
if (spinpos % 100) == 0:
if spinpos == 0:
sys.stdout.write(". ")
else:
sys.stdout.write(".")
sys.stdout.flush()
def update_scroll_spinner():
global spinner, spinpos
if(spinpos >= len(spinner)):
sys.stdout.write(
darkgreen(" \b\b\b" + spinner[len(spinner) - 1 - (spinpos % len(spinner))]))
else:
sys.stdout.write(green("\b " + spinner[spinpos]))
sys.stdout.flush()
spinpos = (spinpos + 1) % (2 * len(spinner))
def update_spinner():
global spinner, spinpos
spinpos = (spinpos + 1) % len(spinner)
sys.stdout.write("\b\b " + spinner[spinpos])
sys.stdout.flush()
|
Crazepony/crazepony-gitbook | wiki/changelink.py | Python | apache-2.0 | 4,252 | 0.010335 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Filename: deleteline.py
import os
import sys
reload(sys)
#sys.setdefaultencoding('utf8')
def ChangeLineInFile(infile,isOverwrite):
isOverwrite = isOverwrite.upper()
_dir = os.path.dirname(infile)
oldbasename = os.path.basename(infile)
newbasename = oldbasename + '-new'
extname = os.path.splitext(infile)[1]
if extname != ".md":
return
outfile = _dir+'/' + newbasename + extname
infp = open(infile, "rb")
outfp = open(outfile, "wb")
lines = infp.readlines()
title = None
for line in lines:
#print type(line)
if (line.find("# ") > -1):
line2 = line.replace("# ", "## ")
outfp.writelines(line2)
elif (line.find("## ") > -1):
line2 = line.replace("## ", "### ")
outfp.writelines(line2)
elif (line.find(" > -1):
line2 = line.replace("
outfp.writelines(line2)
else:
#print line
outfp.writelines(line)
infp.close()
outfp.close()
if isOverwrite == 'Y':
#print 'remove',infile
os.remove(infile)
os.rename(outfile, infile)
outfile = infile
#print 'read %s'%infile, 'and save as %s'%outfile
print 'read %s and save as %s'%(infile, outfile)
def DelLineInFile(infile,isOverwrite):
isOverwrite = isOverwrite.upper()
_dir = os.path.dirname(infile)
oldbasename = os.path.basename(infile)
newbasename = oldbasename + '-new'
extname = os.path.splitext(infile)[1]
if extname != ".md":
return
outfile = _dir+'/' + newbasename + extname
infp = open(infile, "rb")
outfp = open(outfile, "wb")
lines = infp.readlines()
title = None
for line in lines:
#print type(line)
if (line.find("---") > -1):
pass
elif (line.find("layout:") > -1):
pass
elif line.find("title:") > -1:
title = line.replace("title:","")
print "title"+title
elif line.find("{{ page.title }}") > -1:
| if title != None:
line2 = line.replace("{{ page.title }}", title)
outfp.writelines(line2)
else:
#print line
outfp.writelines(line)
infp.close()
outfp.close()
if isOverwrite == 'Y':
#print 'remove',infile
os.remove(infile)
os.rename(outfile, infile)
outfile = infile
#print 'read %s'%infile, 'and save as %s'%outfile
print 'read %s and save as %s'%(infile, outfile)
|
def ChangeLineInFolders():
string = u'请输入目标文件夹路径====>'
inpath = raw_input(string.encode('utf8'))
string = u'您输入是:' + inpath
print string
string = u'是否覆盖源文件(Y/N)'
isOverwrite = raw_input(string.encode('utf8'))
isOverwrite = isOverwrite.upper()
string = u'您的选择是:' + isOverwrite
print string
for (path,dirs,files) in os.walk(inpath):
for f in files:
infile = os.path.join(path, f)
#print infile
ChangeLineInFile(infile,isOverwrite)
if __name__ == "__main__":
string = u'1 修改指定目录下所有文.md件(包括子目录)'
print string
string = u'2 修改指定md文件 '
print string
string = u'请输入数字编号====>'
index = int(raw_input(string.encode('utf8')))
if index == 1:
ChangeLineInFolders()
elif index ==2:
string = u'请输入目标文件路径====>'
infile = raw_input(string.encode('utf8'))
string = u'您输入是:' + infile
print string
string = u'是否覆盖源文件(Y/N)'
isOverwrite = raw_input(string.encode('utf8'))
string = u'您的选择是:' + isOverwrite.upper()
print string
DelLineInFile(infile, isOverwrite)
else:
string = u'编号输入错误,程序退出'
print string
sys.exit()
raw_input("press Enter to exit")
sys.exit()
|
piyueh/SEM-Toolbox | utils/errors/__init__.py | Python | mit | 330 | 0 | #! /usr/bin/env | python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Pi-Yueh Chuang <pychuang@gwu.edu>
#
# Distributed under terms of the MIT license.
"""__init__.py"""
from utils.errors.Error import Error
from utils.errors.InfLoopError import InfLoopError
__author__ = "Pi-Yueh Chuang"
__version__ | = "alpha"
|
davidvon/pipa-pay-server | admin/utils/__init__.py | Python | apache-2.0 | 28 | 0.035714 | __ | author__ = 'f | engguanhua'
|
galek/anki-3d-engine | docs/drafts/octree.py | Python | bsd-3-clause | 246 | 0.03252 | from math import *
octree_node_size = 112
def recurse(depth):
if depth == 0:
retur | n 1
else:
return pow(8, depth) + recurse(depth - 1)
def octree_size(depth):
return recurse(depth) * octree_node_size
print("Size %d" % (octree_size(3))) | |
codypiersall/mlab | tests/test_mlab_on_unix.py | Python | mit | 753 | 0.003984 | import sys
sys.path = ['../src/'] + sys.path
import unittest
from mlab.mlabwrap import Matlab | ReleaseNotFound
class Test | MlabUnix(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_version_discovery(self):
import mlab
instances = mlab.releases.MatlabVersions(globals())
assert len(instances.pick_latest_release()) > 0
with self.assertRaises(MatlabReleaseNotFound):
mlab_inst = instances.get_mlab_instance('R2010c')
def test_latest_release(self):
from mlab.releases import latest_release
from matlab import matlabroot
self.assertTrue(len(matlabroot())>0)
matlabroot()
if __name__ == '__main__':
unittest.main()
|
openelisglobal/openelisglobal-sandbox | liquibase/OE2.7/CILNSPMassive/scripts/dictionary.py | Python | mpl-2.0 | 1,212 | 0.005776 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def get_comma_split_names( name ):
split_name_list = [name]
if ',' in name:
split_name_list = name.split(",")
elif ';' in name:
split_name_list = name.split(";")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
old = []
old_file = open("currentDictNames.txt")
new_file = open("selectList.txt")
result = ope | n("dictionaryResult.sql",'w')
for line in old_file:
old.append(line.strip())
old_file.close()
for line in new_file:
if len(line) > 1:
values = get_comma_split_names(line)
for value in values:
if value.strip() not in old:
old.a | ppend(value.strip())
result.write("INSERT INTO clinlims.dictionary ( id, is_active, dict_entry, lastupdated, dictionary_category_id ) \n\t")
result.write("VALUES ( nextval( 'dictionary_seq' ) , 'Y' , '" + value.strip() + "' , now(), ( select id from clinlims.dictionary_category where description = 'Haiti Lab' ));\n")
result.close()
print "Done check dictionaryResult.sql for values"
|
manu0466/BookingBot | src/bot/handler/decorator/__init__.py | Python | gpl-2.0 | 67 | 0 | from . | FilterableHandlerDecorator import FilterableHandlerDecorator
| |
hugoallan9/programacionMatematica | skeleton/App/hilo.py | Python | gpl-3.0 | 352 | 0.014286 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 10 | 06:32:29 2016
@author: hugo
"""
import threading
def worker(count):
for x in range(count):
print "Programación matemática %s \n " % x
return
threads = list()
t = threading.Thread(target=worker, args=(10,))
threads.app | end(t)
t.start()
print 'Hola mundo' |
ray-project/ray | rllib/examples/models/rnn_model.py | Python | apache-2.0 | 5,133 | 0.000974 | import numpy as np
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.models.tf.recurrent_net import RecurrentNetwork
from ray.rllib.models.torch.recurrent_net import RecurrentNetwork as TorchRNN
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf, try_import_torch
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()
class RNNModel(RecurrentNetwork):
"""Example of using the Keras functional API to define a RNN model."""
def __init__(
self,
obs_space,
action_space,
num_outputs,
model_config,
name,
hiddens_size=256,
cell_size=64,
):
super(RNNModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name
)
self.cell_size = cell_size
# Define input layers
input_layer = tf.keras.layers.Input(
shape=(None, obs_space.shape[0]), name="inputs"
)
state_in_h = tf.keras.layers.Input(shape=(cell_size,), name="h")
state_in_c = tf.keras.layers.Input(shape=(cell_size,), name="c")
seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)
# Preprocess observation with a hidden layer and send to LSTM cell
dense1 = tf.keras.layers.Dense(
hiddens_size, activation=tf.nn.relu, name="dense1"
)(input_layer)
lstm_out, state_h, state_c = tf.keras.layers.LSTM(
cell_size, return_sequences=True, return_state=True, name="lstm"
)(
inputs=dense1,
mask=tf.sequence_mask(seq_in),
initial_state=[state_i | n_h, state_in_c],
)
# Postprocess LSTM output with another hidden layer and compute values
logits = tf.keras.layers.Dense(
self.num_outputs, | activation=tf.keras.activations.linear, name="logits"
)(lstm_out)
values = tf.keras.layers.Dense(1, activation=None, name="values")(lstm_out)
# Create the RNN model
self.rnn_model = tf.keras.Model(
inputs=[input_layer, seq_in, state_in_h, state_in_c],
outputs=[logits, values, state_h, state_c],
)
self.rnn_model.summary()
@override(RecurrentNetwork)
def forward_rnn(self, inputs, state, seq_lens):
model_out, self._value_out, h, c = self.rnn_model([inputs, seq_lens] + state)
return model_out, [h, c]
@override(ModelV2)
def get_initial_state(self):
return [
np.zeros(self.cell_size, np.float32),
np.zeros(self.cell_size, np.float32),
]
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
class TorchRNNModel(TorchRNN, nn.Module):
def __init__(
self,
obs_space,
action_space,
num_outputs,
model_config,
name,
fc_size=64,
lstm_state_size=256,
):
nn.Module.__init__(self)
super().__init__(obs_space, action_space, num_outputs, model_config, name)
self.obs_size = get_preprocessor(obs_space)(obs_space).size
self.fc_size = fc_size
self.lstm_state_size = lstm_state_size
# Build the Module from fc + LSTM + 2xfc (action + value outs).
self.fc1 = nn.Linear(self.obs_size, self.fc_size)
self.lstm = nn.LSTM(self.fc_size, self.lstm_state_size, batch_first=True)
self.action_branch = nn.Linear(self.lstm_state_size, num_outputs)
self.value_branch = nn.Linear(self.lstm_state_size, 1)
# Holds the current "base" output (before logits layer).
self._features = None
@override(ModelV2)
def get_initial_state(self):
# TODO: (sven): Get rid of `get_initial_state` once Trajectory
# View API is supported across all of RLlib.
# Place hidden states on same device as model.
h = [
self.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0),
self.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0),
]
return h
@override(ModelV2)
def value_function(self):
assert self._features is not None, "must call forward() first"
return torch.reshape(self.value_branch(self._features), [-1])
@override(TorchRNN)
def forward_rnn(self, inputs, state, seq_lens):
"""Feeds `inputs` (B x T x ..) through the Gru Unit.
Returns the resulting outputs as a sequence (B x T x ...).
Values are stored in self._cur_value in simple (B) shape (where B
contains both the B and T dims!).
Returns:
NN Outputs (B x T x ...) as sequence.
The state batches as a List of two items (c- and h-states).
"""
x = nn.functional.relu(self.fc1(inputs))
self._features, [h, c] = self.lstm(
x, [torch.unsqueeze(state[0], 0), torch.unsqueeze(state[1], 0)]
)
action_out = self.action_branch(self._features)
return action_out, [torch.squeeze(h, 0), torch.squeeze(c, 0)]
|
bfalacerda/strands_executive | task_executor/tests/mdp_exec_test.py | Python | mit | 2,319 | 0.004743 | #!/usr/bin/env python
import rospy
from strands_executive_msgs import task_utils
from strands_executive_msgs.msg import Task
from strands_executive_msgs.srv import AddTasks, SetExecutionStatus
from strands_navigation_msgs.msg import *
import sys
def get_services():
# get services necessary to do the job
add_tasks_srv_name = '/task_executor/add_tasks'
set_exe_stat_srv_name = '/task_executor/set_execution_status'
rospy.loginfo("Waiting for task_executor service...")
rospy.wait_for_service(add_tasks_srv_name)
rospy.wait_for_service(set_ | exe_stat_srv_name)
rospy.loginfo("Done")
add_tasks_srv = rospy.ServiceProxy(add_tasks_srv_name, AddTasks)
set_execution_status = rospy.ServiceProxy(set_exe_stat_srv_name, SetExecutionStatus)
return add_tasks_srv, set | _execution_status
def create_wait_task(node, secs=rospy.Duration(1), start_after=None, window_size=rospy.Duration(3600)):
if start_after is None:
start_after = rospy.get_rostime()
wait_task = Task(action='wait_action',start_node_id=node, end_node_id=node, max_duration=secs)
wait_task.start_after = start_after
wait_task.end_before = wait_task.start_after + window_size
task_utils.add_time_argument(wait_task, rospy.Time())
task_utils.add_duration_argument(wait_task, secs)
return wait_task
if __name__ == '__main__':
rospy.init_node("example_multi_add_client")
# get services to call into execution framework
add_task, set_execution_status = get_services()
node_count = 10
nodes = ['WayPoint%s' % node for node in range(1, node_count + 1)]
tasks = map(create_wait_task, nodes)
# Add a time-critical task, i.e. one with a zero-sized window
tasks.append(create_wait_task('ChargingPoint',
start_after=rospy.get_rostime() + rospy.Duration(160),
window_size=rospy.Duration(0)))
task_id = add_task(tasks)
# Set the task executor running (if it isn't already)
resp = set_execution_status(True)
# now let's stop execution while it's going on
# rospy.sleep(4)
# resp = set_execution_status(False)
# rospy.loginfo('Success: %s' % resp.success)
# rospy.loginfo('Wait: %s' % resp.remaining_execution_time)
# # and start again
# rospy.sleep(2)
# set_execution_status(True)
|
centaurialpha/edis | src/ui/dialogs/file_properties.py | Python | gpl-3.0 | 3,225 | 0.000932 | # -*- coding: utf-8 -*-
# EDIS - a simple cross-platform IDE for C
#
# This file is part of Edis
# Copyright 2014-2015 - Gabriel Acosta <acostadariogabriel at gmail>
# License: GPLv3 (see http://www.gnu.org/licenses/gpl.html)
import re
import os
from datetime import datetime
from PyQt4.QtGui import (
QDialog,
QVBoxLayout,
QGridLayout,
QLabel,
QPushButton
)
from PyQt4.QtCore import (
Qt,
QFile
)
class FileProperty(QDialog):
def __init__(self, editor, parent=None):
QDialog.__init__(self, parent, Qt.Dialog)
self.setWindowTitle(self.tr("Propiedades del Archivo"))
filename = editor.filename
vLayout = QVBoxLayout(self)
vLayout.setContentsMargins(10, 15, 10, 10)
vLayout.setSpacing(10)
lbl_title = QLabel(filename.split('/')[-1])
lbl_title.setStyleSheet("font-weight: bold; font-size: 24px;")
vLayout.addWidget(lbl_title)
grid = QGridLayout()
grid.addWidget(QLabel(self.tr("<b>Tipo:</b>")), 1, 0)
grid.addWidget(QLabel(self.get_type(filename)), 1, 1)
grid.addWidget(QLabel(self.tr("<b>Tamaño:</b>")), 2, 0)
grid.addWidget(QLabel(self.get_size(filename)), 2, 1)
grid.addWidget(QLabel(self.tr("<b>Ubicación:</b>")), 3, 0)
grid.addWidget(QLabel(filename), 3, 1)
grid.addWidget(QLabel(self.tr("<b>Líneas de código:</b>")), 4, 0)
grid.addWidget(QLabel(self.tr("{0}").format(editor.lines() -
len(self.get_comment_spaces(editor)))), 4, 1)
grid.addWidget(QLabel(
self.tr("<b>Comentarios y líneas en blanco:</b>")), 5, 0)
grid.addWidget(QLabel(
self.tr("{0}").format(len(self.get_comment_spaces(editor)))), 5, 1)
grid.addWidget(QLabel(self.tr("<b>Total de líneas:</b>")), 6, 0)
grid.addWidget(QLabel(str(editor.lines())), 6, 1)
grid.addWidget(QLabel(self.tr("<b>Modificado:</b>")), 7, 0)
grid.addWidget(QLabel(self.tr(self.get_modification(filename))), 7, 1)
btn_aceptar = QPushB | utton(self.tr("Aceptar"))
grid.addWidget(btn_aceptar, 8, 1, Qt.AlignRight)
vLayout.addLayout(grid)
b | tn_aceptar.clicked.connect(self.close)
def get_type(self, filename):
try:
ext = filename.split('.')[-1]
if ext == 'c':
type_ = self.tr("Archivo fuente C")
elif ext == 'h':
type_ = self.tr("Archivo de Cabecera")
elif ext == 's':
type_ = self.tr("ASM")
return type_
except:
return filename.split('.')[-1].upper()
def get_size(self, filename):
size = (float(QFile(filename).size() + 1023.0) / 1024.0)
return str(size)
def get_comment_spaces(self, editor):
spaces = re.findall('(^\n)|(^(\s+)?//)|(^( +)?($|\n))',
editor.text(), re.M)
return spaces
def get_modification(self, filename):
try:
time = os.path.getmtime(filename)
format_time = datetime.fromtimestamp(
time).strftime("%Y-%m-%d %H:%M")
return format_time
except:
return "-"
|
AdamWill/bodhi | bodhi/tests/server/test_metadata.py | Python | gpl-2.0 | 16,797 | 0.001072 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import glob
import shutil
import tempfile
import unittest
from datetime import datetime
from hashlib import sha256
from os.path import join, exists, basename
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from zope.sqlalchemy impor | t ZopeTransactionExtension
import createrepo_c
from bodhi.server import log
from bodhi.server.config import config
from bodhi.server.util import mkmetad | atadir
from bodhi.server.models import (Package, Update, Build, Base,
UpdateRequest, UpdateStatus, UpdateType)
from bodhi.server.buildsys import get_session, DevBuildsys
from bodhi.server.metadata import ExtendedMetadata
from bodhi.tests.server.functional.base import DB_PATH
from bodhi.tests.server import populate
class TestExtendedMetadata(unittest.TestCase):
def __init__(self, *args, **kw):
super(TestExtendedMetadata, self).__init__(*args, **kw)
repo_path = os.path.join(config.get('mash_dir'), 'f17-updates-testing')
if not os.path.exists(repo_path):
os.makedirs(repo_path)
def setUp(self):
engine = create_engine(DB_PATH)
Session = scoped_session(sessionmaker(extension=ZopeTransactionExtension(keep_session=True)))
Session.configure(bind=engine)
log.debug('Creating all models for %s' % engine)
Base.metadata.bind = engine
Base.metadata.create_all(engine)
self.db = Session()
populate(self.db)
# Initialize our temporary repo
self.tempdir = tempfile.mkdtemp('bodhi')
self.temprepo = join(self.tempdir, 'f17-updates-testing')
mkmetadatadir(join(self.temprepo, 'f17-updates-testing', 'i386'))
self.repodata = join(self.temprepo, 'f17-updates-testing', 'i386', 'repodata')
assert exists(join(self.repodata, 'repomd.xml'))
DevBuildsys.__rpms__ = [{
'arch': 'src',
'build_id': 6475,
'buildroot_id': 1883,
'buildtime': 1178868422,
'epoch': None,
'id': 62330,
'name': 'bodhi',
'nvr': 'bodhi-2.0-1.fc17',
'release': '1.fc17',
'size': 761742,
'version': '2.0'
}]
def tearDown(self):
self.db.close()
get_session().clear()
shutil.rmtree(self.tempdir)
def _verify_updateinfo(self, repodata):
updateinfos = glob.glob(join(repodata, "*-updateinfo.xml*"))
assert len(updateinfos) == 1, "We generated %d updateinfo metadata" % len(updateinfos)
updateinfo = updateinfos[0]
hash = basename(updateinfo).split("-", 1)[0]
hashed = sha256(open(updateinfo).read()).hexdigest()
assert hash == hashed, "File: %s\nHash: %s" % (basename(updateinfo), hashed)
return updateinfo
def get_notice(self, uinfo, title):
for record in uinfo.updates:
if record.title == title:
return record
def test_extended_metadata(self):
update = self.db.query(Update).one()
# Pretend it's pushed to testing
update.status = UpdateStatus.testing
update.request = None
update.date_pushed = datetime.utcnow()
DevBuildsys.__tagged__[update.title] = ['f17-updates-testing']
# Generate the XML
md = ExtendedMetadata(update.release, update.request, self.db, self.temprepo)
# Insert the updateinfo.xml into the repository
md.insert_updateinfo()
updateinfo = self._verify_updateinfo(self.repodata)
# Read an verify the updateinfo.xml.gz
uinfo = createrepo_c.UpdateInfo(updateinfo)
notice = self.get_notice(uinfo, 'mutt-1.5.14-1.fc13')
self.assertIsNone(notice)
self.assertEquals(len(uinfo.updates), 1)
notice = uinfo.updates[0]
self.assertIsNotNone(notice)
self.assertEquals(notice.title, update.title)
self.assertEquals(notice.release, update.release.long_name)
self.assertEquals(notice.status, update.status.value)
if update.date_modified:
self.assertEquals(notice.updated_date, update.date_modified)
self.assertEquals(notice.fromstr, config.get('bodhi_email'))
self.assertEquals(notice.rights, config.get('updateinfo_rights'))
self.assertEquals(notice.description, update.notes)
#self.assertIsNotNone(notice.issued_date)
self.assertEquals(notice.id, update.alias)
bug = notice.references[0]
self.assertEquals(bug.href, update.bugs[0].url)
self.assertEquals(bug.id, '12345')
self.assertEquals(bug.type, 'bugzilla')
cve = notice.references[1]
self.assertEquals(cve.type, 'cve')
self.assertEquals(cve.href, update.cves[0].url)
self.assertEquals(cve.id, update.cves[0].cve_id)
col = notice.collections[0]
self.assertEquals(col.name, update.release.long_name)
self.assertEquals(col.shortname, update.release.name)
pkg = col.packages[0]
self.assertEquals(pkg.epoch, '0')
self.assertEquals(pkg.name, 'TurboGears')
self.assertEquals(pkg.src, 'https://download.fedoraproject.org/pub/fedora/linux/updates/testing/17/SRPMS/T/TurboGears-1.0.2.2-2.fc7.src.rpm')
self.assertEquals(pkg.version, '1.0.2.2')
self.assertFalse(pkg.reboot_suggested)
self.assertEquals(pkg.arch, 'src')
self.assertEquals(pkg.filename, 'TurboGears-1.0.2.2-2.fc7.src.rpm')
def test_extended_metadata_updating(self):
update = self.db.query(Update).one()
# Pretend it's pushed to testing
update.status = UpdateStatus.testing
update.request = None
update.date_pushed = datetime.utcnow()
DevBuildsys.__tagged__[update.title] = ['f17-updates-testing']
# Generate the XML
md = ExtendedMetadata(update.release, update.request, self.db, self.temprepo)
# Insert the updateinfo.xml into the repository
md.insert_updateinfo()
md.cache_repodata()
updateinfo = self._verify_updateinfo(self.repodata)
# Read an verify the updateinfo.xml.gz
uinfo = createrepo_c.UpdateInfo(updateinfo)
notice = self.get_notice(uinfo, update.title)
self.assertIsNotNone(notice)
self.assertEquals(notice.title, update.title)
self.assertEquals(notice.release, update.release.long_name)
self.assertEquals(notice.status, update.status.value)
self.assertEquals(notice.updated_date, update.date_modified)
self.assertEquals(notice.fromstr, config.get('bodhi_email'))
self.assertEquals(notice.description, update.notes)
#self.assertIsNotNone(notice.issued_date)
self.assertEquals(notice.id, update.alias)
#self.assertIsNone(notice.epoch)
bug = notice.references[0]
url = update.bugs[0].url
self.assertEquals(bug.href, url)
self.assertEquals(bug.id, '12345')
self.assertEquals(bug.type, 'bugzilla')
cve = notice.references[1]
self.assertEquals(cve.type, 'cve')
self.assertEquals(cve.href, update.cves[0].url)
self.assertEquals(cve.id, update.cves[0].cve_id)
# Change the notes on the update, but not the date_modified, so we can
# ensure that the notice came from the cache
update.notes = u'x'
# Re-initialize our temporary repo
shutil.rmtree(self.temprepo)
os.mkdir(self.temprepo)
|
rwl/openpowersystem | rdflib/util.py | Python | agpl-3.0 | 7,101 | 0.006337 | from rdflib.URIRef import URIRef
from rdflib.BNode import BNode
from rdflib.Literal import Literal
from rdflib.Variable import Variable
from rdflib.Graph import Graph, QuotedGraph
from rdflib.Statement import Statement
from rdflib.exceptions import SubjectTypeError, PredicateTypeError, ObjectTypeError, ContextTypeError
from rdflib.compat import rsplit
from cPickle import loads
def list2set(seq):
seen = set()
return [ x for x in seq if x not in seen and not seen.add(x)]
def first(seq):
for result in seq:
return result
return None
def uniq(sequence, strip=0):
"""removes duplicate strings from the sequence."""
set = {}
if strip:
map(lambda val, default: set.__setitem__(val.strip(), default),
sequence, [])
else:
map(set.__setitem__, sequence, [])
return set.keys()
def more_than(sequence, number):
"Returns 1 if sequence has more items than number and 0 if not."
i = 0
for item in sequence:
i += 1
if i > number:
return 1
return 0
def term(str, default=None):
"""See also from_n3"""
if not str:
return default
elif str.startswith("<") and str.endswith(">"):
return URIRef(str[1:-1])
elif str.startswith('"') and str.endswith('"'):
return Literal(str[1:-1])
elif str.startswith("_"):
return BNode(str)
else:
msg = "Unknown Term Syntax: '%s'" % str
raise Exception(msg)
from time import mktime, time, gmtime, localtime, timezone, altzone, daylight
def date_time(t=None, local_time_zone=False):
"""http://www.w3.org/TR/NOTE-datetime ex: 1997-07-16T19:20:30Z
>>> date_time(1126482850)
'2005-09-11T23:54:10Z'
@@ this will change depending on where it is run
#>>> date_time(1126482850, local_time_zone=True)
#'2005-09-11T19:54:10-04:00'
>>> date_time(1)
'1970-01-01T00:00:01Z'
>>> date_time(0)
'1970-01-01T00:00:00Z'
"""
if t is None:
t = time()
if local_time_zone:
time_tuple = localtime(t)
if time_tuple[8]:
tz_mins = altzone // 60
else:
tz_mins = timezone // 60
tzd = "-%02d:%02d" % (tz_mins // 60, tz_mins % 60)
else:
time_tuple = gmtime(t)
tzd = "Z"
year, month, day, hh, mm, ss, wd, y, z = time_tuple
s = "%0004d-%02d-%02dT%02d:%02d:%02d%s" % ( year, month, day, hh, mm, ss, tzd)
return s
def parse_date_time(val):
"""always returns seconds in UTC
# tests are written like this to make any errors easier to understand
>>> parse_date_time('2005-09-11T23:54:10Z') - 1126482850.0
0.0
>>> parse_date_time('2005-09-11T16:54:10-07:00') - 1126482850.0
0.0
>>> parse_date_time('1970-01-01T00:00:01Z') - 1.0
0.0
>>> parse_date_time('1970-01-01T00:00:00Z') - 0.0
0.0
>>> parse_date_time("2005-09-05T10:42:00") - 1125916920.0
0.0
"""
if "T" not in val:
val += "T00:00:00Z"
ymd, time = val.split("T")
hms, tz_str = time[0:8], time[8:]
if not tz_str or tz_str=="Z":
time = time[:-1]
tz_offset = 0
else:
signed_hrs = int(tz_str[:3])
mins = int(tz_str[4:6])
secs = (cmp(signed_hrs, 0) * mins + signed_hrs * 60) * 60
tz_offset = -secs
year, month, day = ymd.split("-")
hour, minute, second = hms.split(":")
t = mktime((int(year), int(month), int(day), int(hour),
int(minute), int(second), 0, 0, 0))
t = t - timezone + tz_offset
return t
def from_n3(s, default=None, backend=None):
""" Creates the Identifier corresponding to the given n3 string. WARNING: untested, may contain bugs. TODO: add test cases."""
if not s:
return default
if s.startswith('<'):
return URIRef(s[1:-1])
elif s.startswith('"'):
# TODO: would a regex be faster?
value, rest = rsplit(s, '"', 1)
value = value[1:] # strip leading quote
if rest.startswith("@"):
if "^^" in rest:
language, rest = rsplit(rest, '^^', 1)
language = language[1:] # strip leading at sign
else:
language = rest[1:] # strip leading at sign
rest = ''
else:
language = None
if rest.startswith("^^"):
datatype = rest[3:-1]
else:
datatype = None
value = value.replace('\\"', '"').replace('\\\\', '\\').decode("unicode-escape")
return Literal(value, language, datatype)
elif s.startswith('{'):
identifier = from_n3(s[1:-1])
return QuotedGraph(backend, identifier)
elif s.startswith('['):
identifier = from_n3(s[1:-1])
return Graph(backend, identifier)
else:
if s.startswith("_:"):
return BNode(s[2:])
else:
return BNode(s)
def check_context(c):
if not (isinstance(c, URIRef) or \
isinstance(c, BNode)):
raise ContextTypeError("%s:%s" % (c, type(c)))
def check_subject(s):
""" Test that s is a valid subject identifier."""
if not (is | instance(s, URIRef) or isinstance(s | , BNode)):
raise SubjectTypeError(s)
def check_predicate(p):
""" Test that p is a valid predicate identifier."""
if not isinstance(p, URIRef):
raise PredicateTypeError(p)
def check_object(o):
""" Test that o is a valid object identifier."""
if not (isinstance(o, URIRef) or \
isinstance(o, Literal) or \
isinstance(o, BNode)):
raise ObjectTypeError(o)
def check_statement((s, p, o)):
if not (isinstance(s, URIRef) or isinstance(s, BNode)):
raise SubjectTypeError(s)
if not isinstance(p, URIRef):
raise PredicateTypeError(p)
if not (isinstance(o, URIRef) or \
isinstance(o, Literal) or \
isinstance(o, BNode)):
raise ObjectTypeError(o)
def check_pattern((s, p, o)):
if s and not (isinstance(s, URIRef) or isinstance(s, BNode)):
raise SubjectTypeError(s)
if p and not isinstance(p, URIRef):
raise PredicateTypeError(p)
if o and not (isinstance(o, URIRef) or \
isinstance(o, Literal) or \
isinstance(o, BNode)):
raise ObjectTypeError(o)
def graph_to_dot(graph, dot):
""" Turns graph into dot (graphviz graph drawing format) using pydot. """
import pydot
nodes = {}
for s, o in graph.subject_objects():
for i in s,o:
if i not in nodes.keys():
nodes[i] = i
for s, p, o in graph.triples((None,None,None)):
dot.add_edge(pydot.Edge(nodes[s], nodes[o], label=p))
if __name__ == "__main__":
# try to make the tests work outside of the time zone they were written in
#import os, time
#os.environ['TZ'] = 'US/Pacific'
#try:
# time.tzset()
#except AttributeError, e:
# print e
#pass
# tzset missing! see
# http://mail.python.org/pipermail/python-dev/2003-April/034480.html
import doctest
doctest.testmod()
|
suyashphadtare/vestasi-erp-jan-end | erpnext/stock/doctype/material_request/material_request.py | Python | agpl-3.0 | 13,650 | 0.024469 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# ERPNext - web based ERP (http://erpnext.com)
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt
from frappe import _
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.buying_controller import BuyingController
form_grid_templates = {
"indent_details": "templates/form_grid/material_request_grid.html"
}
class MaterialRequest(BuyingController):
tname = 'Material Request Item'
fname = 'indent_details'
def check_if_already_pulled(self):
pass#if self.[d.sales_order_no for d in self.get('inde | nt_details')]
def validate_qty_against_so(self):
so_items = {} # Format --> {'SO/00001': {'Item/001': 120, 'Item/002': 24}}
for d in self.get('indent_details'):
if d.sales_order_no:
if not so_items.has_key(d.sales_order_no):
so_items[d.sales_order_no] = {d.item_code: flt(d.qty)}
else:
| if not so_items[d.sales_order_no].has_key(d.item_code):
so_items[d.sales_order_no][d.item_code] = flt(d.qty)
else:
so_items[d.sales_order_no][d.item_code] += flt(d.qty)
for so_no in so_items.keys():
for item in so_items[so_no].keys():
already_indented = frappe.db.sql("""select sum(ifnull(qty, 0))
from `tabMaterial Request Item`
where item_code = %s and sales_order_no = %s and
docstatus = 1 and parent != %s""", (item, so_no, self.name))
already_indented = already_indented and flt(already_indented[0][0]) or 0
actual_so_qty = frappe.db.sql("""select sum(ifnull(qty, 0)) from `tabSales Order Item`
where parent = %s and item_code = %s and docstatus = 1""", (so_no, item))
actual_so_qty = actual_so_qty and flt(actual_so_qty[0][0]) or 0
if actual_so_qty and (flt(so_items[so_no][item]) + already_indented > actual_so_qty):
frappe.throw(_("Material Request of maximum {0} can be made for Item {1} against Sales Order {2}").format(actual_so_qty - already_indented, item, so_no))
def validate_schedule_date(self):
for d in self.get('indent_details'):
if d.schedule_date and d.schedule_date < self.transaction_date:
frappe.throw(_("Expected Date cannot be before Material Request Date"))
# Validate
# ---------------------
def validate(self):
super(MaterialRequest, self).validate()
self.validate_schedule_date()
self.validate_uom_is_integer("uom", "qty")
if not self.status:
self.status = "Draft"
from erpnext.utilities import validate_status
validate_status(self.status, ["Draft", "Submitted", "Stopped", "Cancelled"])
self.validate_value("material_request_type", "in", ["Purchase", "Transfer"])
pc_obj = frappe.get_doc('Purchase Common')
pc_obj.validate_for_items(self)
# self.validate_qty_against_so()
# NOTE: Since Item BOM and FG quantities are combined, using current data, it cannot be validated
# Though the creation of Material Request from a Production Plan can be rethought to fix this
def update_bin(self, is_submit, is_stopped):
""" Update Quantity Requested for Purchase in Bin for Material Request of type 'Purchase'"""
from erpnext.stock.utils import update_bin
for d in self.get('indent_details'):
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == "Yes":
if not d.warehouse:
frappe.throw(_("Warehouse required for stock Item {0}").format(d.item_code))
qty =flt(d.qty)
if is_stopped:
qty = (d.qty > d.ordered_qty) and flt(flt(d.qty) - flt(d.ordered_qty)) or 0
args = {
"item_code": d.item_code,
"warehouse": d.warehouse,
"indented_qty": (is_submit and 1 or -1) * flt(qty),
"posting_date": self.transaction_date
}
update_bin(args)
def on_submit(self):
frappe.db.set(self, 'status', 'Submitted')
# self.update_requested_qty()
self.update_bin(is_submit = 1, is_stopped = 0)
def check_modified_date(self):
mod_db = frappe.db.sql("""select modified from `tabMaterial Request` where name = %s""",
self.name)
date_diff = frappe.db.sql("""select TIMEDIFF('%s', '%s')"""
% (mod_db[0][0], cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(_(self.doctype), self.name))
def update_status(self, status):
self.check_modified_date()
# self.update_requested_qty()
self.update_bin(is_submit = (status == 'Submitted') and 1 or 0, is_stopped = 1)
frappe.db.set(self, 'status', cstr(status))
frappe.msgprint(_("Status updated to {0}").format(_(status)))
def on_cancel(self):
# Step 1:=> Get Purchase Common Obj
pc_obj = frappe.get_doc('Purchase Common')
# Step 2:=> Check for stopped status
pc_obj.check_for_stopped_status(self.doctype, self.name)
# Step 3:=> Check if Purchase Order has been submitted against current Material Request
pc_obj.check_docstatus(check = 'Next', doctype = 'Purchase Order', docname = self.name, detail_doctype = 'Purchase Order Item')
# Step 4:=> Update Bin
self.update_bin(is_submit = 0, is_stopped = (cstr(self.status) == 'Stopped') and 1 or 0)
# Step 5:=> Set Status
frappe.db.set(self,'status','Cancelled')
# self.update_requested_qty()
def update_completed_qty(self, mr_items=None):
if self.material_request_type != "Transfer":
return
item_doclist = self.get("indent_details")
if not mr_items:
mr_items = [d.name for d in item_doclist]
per_ordered = 0.0
for d in item_doclist:
if d.name in mr_items:
d.ordered_qty = flt(frappe.db.sql("""select sum(transfer_qty)
from `tabStock Entry Detail` where material_request = %s
and material_request_item = %s and docstatus = 1""",
(self.name, d.name))[0][0])
frappe.db.set_value(d.doctype, d.name, "ordered_qty", d.ordered_qty)
# note: if qty is 0, its row is still counted in len(item_doclist)
# hence adding 1 to per_ordered
if (d.ordered_qty > d.qty) or not d.qty:
per_ordered += 1.0
elif d.qty > 0:
per_ordered += flt(d.ordered_qty / flt(d.qty))
self.per_ordered = flt((per_ordered / flt(len(item_doclist))) * 100.0, 2)
frappe.db.set_value(self.doctype, self.name, "per_ordered", self.per_ordered)
#newly added update_requested_qty method on 23rd march 2015
def update_requested_qty(self, mr_item_rows=None):
"""update requested qty (before ordered_qty is updated)"""
from erpnext.stock.utils import get_bin
def _update_requested_qty(item_code, warehouse):
requested_qty = frappe.db.sql("""select sum(mr_item.qty - ifnull(mr_item.ordered_qty, 0))
from `tabMaterial Request Item` mr_item, `tabMaterial Request` mr
where mr_item.item_code=%s and mr_item.warehouse=%s
and mr_item.qty > ifnull(mr_item.ordered_qty, 0) and mr_item.parent=mr.name
and mr.status!='Stopped' and mr.docstatus=1""", (item_code, warehouse))
bin_doc = get_bin(item_code, warehouse)
bin_doc.indented_qty = flt(requested_qty[0][0]) if requested_qty else 0
bin_doc.save()
item_wh_list = []
for d in self.get("indent_details"):
if (not mr_item_rows or d.name in mr_item_rows) and [d.item_code, d.warehouse] not in item_wh_list \
and frappe.db.get_value("Item", d.item_code, "is_stock_item") == "Yes" and d.warehouse:
item_wh_list.append([d.item_code, d.warehouse])
for item_code, warehouse in item_wh_list:
_update_requested_qty(item_code, warehouse)
def update_completed_qty(doc, method):
if doc.doctype == "Stock Entry":
material_request_map = {}
for d in doc.get("mtn_details"):
if d.material_request:
material_request_map.setdefault(d.material_request, []).append(d.material_request_item)
for mr_name, mr_items in material_request_map.items():
mr_obj = frappe.get_doc("Material Request", mr_name)
if mr_obj.status in ["Stopped", "Cancelled"]:
frappe.throw(_("Material Request {0} is cancelled or stopped").format(mr_obj.name),
frappe.InvalidStatusError)
_update_requested_qty(doc, mr_obj, mr_items)
# update ordered percentage and qty
mr_obj.update_completed_qty(mr_items)
# def _update_requested_qty(doc, mr_obj, mr_items):
# """update requested q |
mjames-upc/python-awips | dynamicserialize/dstypes/com/raytheon/uf/common/auth/user/User.py | Python | bsd-3-clause | 673 | 0.002972 | ##
##
# File auto-generated agains | t equivalent DynamicSerialize Java class
from dynamicserialize.dstypes.com.raytheon.uf.common.auth.user import UserId
class User(object):
def __init__(self, userId=None):
if userId is None:
self.userId = UserId.UserId()
else:
| self.userId = userId
self.authenticationData = None
def getUserId(self):
return self.userId
def setUserId(self, userId):
self.userId = userId
def getAuthenticationData(self):
return self.authenticationData
def setAuthenticationData(self, authenticationData):
self.authenticationData = authenticationData
|
ava-project/AVA | ava/input/KeyManager.py | Python | mit | 1,876 | 0.001066 | import io
import sys
import threading
import wave
from pynput import keyboard
from pynput.keyboard import Key, Controller
from .RawInput import RawInput
from ..components import _BaseComponent
class KeyManager:
def __init__(self, queues):
self.activated = False
self.listener = None
self.input_listener = RawInput()
self.activated = False
self.input_queue = queues
def write_to_file(self, all_datas):
audio_file = io.BytesIO()
wf = wave.Wave_write(audio_file)
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(16000)
wf.writeframes(b''.join(all_datas))
audio_file.seek(0)
self.input_queue.put(audio_file)
def on_press(self, key):
try:
if key == Key.ctrl_l and not self.activated:
self.activated = True
print(
"Voice recognition activated ! Release when you are done..."
)
self.input_listener.reading_thread = threading.Thread(
target=self.input_listener.read)
self.input_listener.reading_thread.start()
except AttributeError:
print("Error on Key pressed")
pass
def on_release(self, key):
if | self.activated:
self.activated = False
self.input_listener.stop()
print("Voice recognition stopped !")
while self.input_listener.done == False:
pass
self.write_to_file(self.input_listener.record)
def run(self):
with keyboard.Listener(
on_p | ress=self.on_press,
on_release=self.on_release) as self.listener:
self.listener.join()
def stop(self):
print('Stopping {0}...'.format(self.__class__.__name__))
self.listener.stop()
|
openstack-dev/bashate | bashate/messages.py | Python | apache-2.0 | 7,248 | 0.000138 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import textwrap
class _Message(object):
"""An individual bashate message.
This should be accessed via the MESSAGES dict keyed by msg_id,
e.g.
from bashate.messages import MESSAGES
print(MESSAGES['E123'].msg)
:param msg_id: The unique message id (E...)
:param msg_str: The short message string, as displayed in program
output
:param long_msg: A longer more involved message, designed for
documentation
"""
def __init__(self, msg_id, msg_str, long_msg, default):
self.msg_id = msg_id
self.msg_str = msg_str
# clean-up from """ to a plain string
if long_msg:
self.long_msg = textwrap.dedent(long_msg)
self.long_msg = self.long_msg.strip()
else:
self.long_msg = None
self.default = default
@property
def msg(self):
# For historical reasons, the code relies on "id: msg" so build
# that up as .msg | property for quick access.
return "%s: %s" % (self.msg_id, self.msg_str)
_messages = {
'E001': {
'msg': 'Trailing Whitespace',
'long_msg': None,
'default': 'E'
},
'E002': {
'msg': 'Tab indents',
'long_msg':
"""
| Spaces are preferred to tabs in source files.
""",
'default': 'E'
},
'E003': {
'msg': 'Indent not multiple of 4',
'long_msg':
"""
Four spaces should be used to offset logical blocks.
""",
'default': 'E'
},
'E004': {
'msg': 'File did not end with a newline',
'long_msg':
"""
It is conventional to have a single newline ending files.
""",
'default': 'E'
},
'E005': {
'msg': 'File does not begin with #! or have .sh prefix',
'long_msg':
"""
This can be useful for tools that use either the interpreter
directive or the file-exension to select highlighting mode,
syntax mode or determine MIME-type, such as file, gerrit and
editors.
""",
'default': 'W'
},
'E006': {
'msg': 'Line too long',
'long_msg':
"""
This check mimics the widely accepted convention from PEP8 and
many other places that lines longer than a standard terminal width
(default=79 columns) can not only cause problems when reading/writing
code, but also often indicates a bad smell, e.g. too many levels
of indentation due to overly complex functions which require
refactoring into smaller chunks.
""",
'default': 'W'
},
'E010': {
'msg': 'The "do" should be on same line as %s',
'long_msg':
"""
Ensure consistency of "do" directive being on the same line as
it's command. For example:
for i in $(seq 1 100);
do
echo "hi"
done
will trigger this error
""",
'default': 'E'
},
'E011': {
'msg': 'Then keyword is not on same line as if or elif keyword',
'long_msg':
"""
Similar to E010, this ensures consistency of if/elif statements
""",
'default': 'E'
},
'E012': {
'msg': 'here-document at line %d delimited by end-of-file',
'long_msg':
"""
This check ensures the closure of heredocs (<<EOF directives).
Bash will warn when a heredoc is delimited by end-of-file, but
it is easily missed and can cause unexpected issues when a
file is sourced.
""",
'default': 'E'
},
'E020': {
'msg': 'Function declaration not in format ^function name {$',
'long_msg':
"""
There are several equivalent ways to define functions in Bash.
This check is for consistency.
""",
'default': 'E'
},
'E040': {
'msg': 'Syntax error',
'long_msg':
"""
`bash -n` determined that there was a syntax error preventing
the script from parsing correctly and running.
""",
'default': 'E'
},
'E041': {
'msg': 'Arithmetic expansion using $[ is deprecated for $((',
'long_msg':
"""
$[ is deprecated and not explained in the Bash manual. $((
should be used for arithmetic.
""",
'default': 'E'
},
'E042': {
'msg': 'local declaration hides errors',
'long_msg':
"""
The return value of "local" is always 0; errors in subshells
used for declaration are thus hidden and will not trigger "set -e".
""",
'default': 'W',
},
'E043': {
'msg': 'Arithmetic compound has inconsistent return semantics',
'long_msg':
"""
The return value of ((expr)) is 1 if "expr" evalues to zero,
otherwise 0. Combined with "set -e", this can be quite
confusing when something like ((counter++)) evaluates to zero,
making the arithmetic evaluation return 1 and triggering the
an error failure. It is therefore best to use assignment with
the $(( operator.
""",
'default': 'W',
},
'E044': {
'msg': 'Use [[ for non-POSIX comparisions',
'long_msg':
"""
[ is the POSIX test operator, while [[ is the bash keyword
comparision operator. Comparisons such as =~, < and > require
the use of [[.
""",
'default': 'E',
},
}
MESSAGES = {}
_default_errors = []
_default_warnings = []
for k, v in _messages.items():
MESSAGES[k] = _Message(k, v['msg'], v['long_msg'], v['default'])
if v['default'] == 'E':
_default_errors.append(k)
if v['default'] == 'W':
_default_warnings.append(k)
# convert this to the regex strings. This looks a bit weird
# but it fits the current model of error/warning/ignore checking
# easily.
_default_errors = '^(' + '|'.join(_default_errors) + ')'
_default_warnings = '^(' + '|'.join(_default_warnings) + ')'
def is_default_error(error):
return re.search(_default_errors, error)
def is_default_warning(error):
return re.search(_default_warnings, error)
def print_messages():
print("\nAvailable bashate checks")
print("------------------------\n")
for k, v in MESSAGES.items():
print(" [%(default)s] %(id)s : %(string)s" % {
'default': v.default,
'id': v.msg_id,
'string': v.msg_str})
if v.long_msg:
for l in v.long_msg.split('\n'):
print(" %s" % l)
print("")
|
cheshirekow/codebase | third_party/lcm/test/python/bool_test.py | Python | gpl-3.0 | 1,778 | 0.001687 | #!/usr/bin/python
import unittest
import lcmtest
class TestBools(unittest.TestCase):
def test | _bool(self):
"""Encode a bools_t message, then verify that it decodes correctly.
Also check that the decoded fields are all of type bool.
"""
msg = lcmtest.bools_t()
msg.one_bool = True
msg.fixed_array = [False, True, False]
msg.num_a = 3
msg.num_b = 2
for a_index in xrange(msg.num_a):
inner_list = []
for b_index in xrange(msg.num_b):
inner_list.append(bool(b_index % | 2))
msg.two_dim_array.append(inner_list)
msg.one_dim_array.append(bool((a_index + 1) % 2))
data = msg.encode()
decoded = lcmtest.bools_t.decode(data)
self.assertEqual(msg.one_bool, decoded.one_bool)
self.assertEqual(list(msg.fixed_array), list(decoded.fixed_array))
self.assertEqual(msg.num_a, decoded.num_a)
self.assertEqual(msg.num_b, decoded.num_b)
self.assertEqual(bool, type(decoded.one_bool))
self.assertTrue(all([type(elem) == bool
for elem in decoded.fixed_array]))
for sublist in decoded.two_dim_array:
self.assertTrue(all([type(elem) == bool for elem in sublist]))
self.assertTrue(all([type(elem) == bool
for elem in decoded.one_dim_array]))
for a_index in xrange(msg.num_a):
for b_index in xrange(msg.num_b):
self.assertEqual(msg.two_dim_array[a_index][b_index],
decoded.two_dim_array[a_index][b_index])
self.assertEqual(msg.one_dim_array[a_index],
decoded.one_dim_array[a_index])
if __name__ == '__main__':
unittest.main()
|
portableant/open-context-py | opencontext_py/apps/edit/inputs/fieldgroups/models.py | Python | gpl-3.0 | 2,043 | 0.002447 | import collections
from jsonfield import JSONField
from datetime import datetime
from django.utils import timezone
from django.db import models
# Stores information about fields for a data entry form
class InputFieldGroup(models.Model):
GROUP_VIS = {'open': 'Show Field Group in an open panel',
'closed': 'Show Field Group in a closed panel',
'hidden': 'Field Group is not shown to the user until certain conditions are met'}
uuid = models.CharField(max_length=50, primary_key=True) # uuid for the rule itself
project_uuid = models.CharField(max_length=50, db_index=True)
profile_uuid = models.CharField(max_length=50, db_index=True) # uuid for the input profile
label = models.CharField(max_length=200) # label for data entry form
sort = models.IntegerField() # sort value a field in a given cell
visibility = models.CharField(max_length=50) # label for data entry form
note = models.TextField() # note for instructions in data entry
obs_num = models.IntegerField() # value for the observation number for
created = models.DateTimeField()
updated = models.DateTimeField(auto_now=True)
def validate_visibility(self, visibility):
""" validates and updates the field group visibility """
if visibility not in self.GROUP_VIS:
# checks to | make sure that
for real_vis_key, value in self.GROUP_VIS.items():
visibility = real_vis_key
break;
return visibility
def save(self, *args, **kwargs):
"""
saves the record with creation date
"""
self.visibility = self.validate_visibility(self.visibility)
| if self.obs_num is None:
self.obs_num = 1
if self.created is None:
self.created = datetime.now()
super(InputFieldGroup, self).save(*args, **kwargs)
class Meta:
db_table = 'crt_fieldgroups'
ordering = ['profile_uuid',
'sort',
'label']
|
uvacw/tcst | lnparse.py | Python | gpl-3.0 | 1,302 | 0.050691 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
bestandsnaam="De_Telegraaf2014-03-22_22-08.TXT"
artikel=0
tekst={}
datum={}
s | ection={}
length={}
loaddate={}
language={}
pubtype={}
journal={}
with open(bestandsnaam,"r") as f:
for line in f:
line=line.replace("\r","")
if line=="\n":
continue
matchObj=re.match(r"\s+(\d+) of (\d+) DOCUMENTS",line)
if matchObj:
# print matchObj.group(1), "of", matchObj.group(2)
artikel= int(matchObj.group(1))
#artikel+=1
tek | st[artikel]=""
continue
if line.startswith("SECTION"):
section[artikel]=line.replace("SECTION: ","").rstrip("\n")
elif line.startswith("LENGTH"):
length[artikel]=line.replace("LENGTH: ","").rstrip("\n")
elif line.startswith("LOAD-DATE"):
loaddate[artikel]=line.replace("LOAD-DATE: ","").rstrip("\n")
elif line.startswith("LANGUAGE"):
language[artikel]=line.replace("LANGUAGE: ","").rstrip("\n")
elif line.startswith("PUBLICATION-TYPE"):
pubtype[artikel]=line.replace("PUBLICATION-TYPE: ","").rstrip("\n")
elif line.startswith("JOURNAL-CODE"):
journal[artikel]=line.replace("JOURNAL-CODE: ","").rstrip("\n")
elif line.lstrip().startswith("Copyright "):
pass
elif line.lstrip().startswith("All Rights Reserved"):
pass
else:
tekst[artikel]=tekst[artikel]+line
|
OHRI-BioInfo/pyZPL | web.py | Python | bsd-2-clause | 1,418 | 0.004937 | from flask import *
from pyZPL import *
from printLabel import printLabel
import xml.etree.ElementTree as ET
import os
app = Flask(__name__)
dn = os.path.dirname(os.path.realpath(__file__))+"/"
tree = ET.parse(dn+"pace.xml")
customElements = tree.findall(".//*[@id]")
customItems = []
for element in customElements:
newItem = ZPLCustomItem()
newItem.ID = element.get("id")
newItem.data = element.text
| newItem.type = element.tag
if element.g | et("fixed"):
newItem.fixed = "readonly"
customItems.append(newItem)
@app.route('/')
def root():
return render_template("index.html",items=customItems)
@app.route('/print', methods=['POST'])
def print_():
customItemsModified = []
if request.method == 'POST':
for key,value in request.form.iteritems():
newItem = ZPLCustomItem()
split = key.split('_')
newItem.type = split[len(split)-1]
newItem.ID = str.join("_",split[:len(split)-1])
newItem.data = request.form[newItem.ID+"_string"]
try:
request.form[newItem.ID+"_bool"]
newItem.visible = True
except KeyError:
newItem.visible = False
customItemsModified.append(newItem)
return printLabel(customItemsModified)
else:
return "can has post?"
if __name__ == '__main__':
app.debug = True
app.run()
|
waseem18/oh-mainline | vendor/packages/docutils/test/test_parsers/test_rst/test_directives/test_sidebars.py | Python | agpl-3.0 | 1,905 | 0.005249 | #! /usr/bin/env python
# $Id: test_sidebars.py 7062 2011-06-30 22:14:29Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for the "sidebar" directive.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['sidebars'] = [
["""\
.. sidebar:: Outer
.. sidebar:: Nested
Body.
""",
"""\
<document source="test data">
<sidebar>
| <title>
Outer
<system_message level="3" line="3" source="test data" type="ERROR">
<paragraph>
The "si | debar" directive may not be used within a sidebar element.
<literal_block xml:space="preserve">
.. sidebar:: Nested
\n\
Body.
"""],
["""\
.. sidebar:: Margin Notes
:subtitle: with options
:class: margin
:name: note:Options
Body.
""",
"""\
<document source="test data">
<sidebar classes="margin" ids="note-options" names="note:options">
<title>
Margin Notes
<subtitle>
with options
<paragraph>
Body.
"""],
["""\
.. sidebar:: Outer
.. topic:: Topic
.. sidebar:: Inner
text
""",
"""\
<document source="test data">
<sidebar>
<title>
Outer
<topic>
<title>
Topic
<system_message level="3" line="5" source="test data" type="ERROR">
<paragraph>
The "sidebar" directive may not be used within topics or body elements.
<literal_block xml:space="preserve">
.. sidebar:: Inner
\n\
text
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
|
zephyrplugins/zephyr | zephyr.plugin.jython/jython2.5.2rc3/Lib/test/test_mhlib.py | Python | epl-1.0 | 11,388 | 0.003864 | """
Tests for the mhlib module
Nick Mathewson
"""
### BUG: This suite doesn't currently test the mime functionality of
### mhlib. It should.
import unittest
from test.test_support import is_jython, run_unittest, TESTFN, TestSkipped
import os, StringIO
import sys
import mhlib
if (sys.platform.startswith("win") or sys.platform=="riscos" or
sys.platform.startswith("atheos") or (is_jython and os._name != 'posix')):
# mhlib.updateline() renames a file to the name of a file that already
# exists. That causes a reasonable OS <wink> to complain in test_sequence
# here, like the "OSError: [Errno 17] File exists" raised on Windows.
# mhlib's listsubfolders() and listallfolders() do something with
# link counts, and that causes test_listfolders() here to get back
# an empty list from its call of listallfolders().
# The other tests here pass on Windows.
raise TestSkipped("skipped on %s -- " % sys.platform +
"too many Unix assumptions")
_mhroot = TESTFN+"_MH"
_mhpath = os.path.join(_mhroot, "MH")
_mhprofile = os.path.join(_mhroot, ".mh_profile")
def normF(f):
return os.path.join(*f.split('/'))
def writeFile(fname, contents):
dir = os.path.split(fname)[0]
if dir and not os.path.exists(dir):
mkdirs(dir)
f = open(fname, 'w')
f.write(contents)
f.close()
def readFile(fname):
f = open(fname)
r = f.read()
f.close()
return r
def writeProfile(dict):
contents = [ "%s: %s\n" % (k, v) for k, v in dict.iteritems() ]
writeFile(_mhprofile, "".join(contents))
def writeContext(folder):
folder = normF(folder)
writeFile(os.path.join(_mhpath, "context"),
"Current-Folder: %s\n" % folder)
def writeCurMessage(folder, cur):
folder = normF(folder)
writeFile(os.path.join(_mhpath, folder, ".mh_sequences"),
"cur: %s\n"%cur)
def writeMessage(folder, n, headers, body):
folder = normF(folder)
headers = "".join([ "%s: %s\n" % (k, v) for k, v in headers.iteritems() ])
contents = "%s\n%s\n" % (headers,body)
mkdirs(os.path.join(_mhpath, folder))
writeFile(os.path.join(_mhpath, folder, str(n)), contents)
def getMH():
return mhlib.MH(os.path.abspath(_mhpath), _mhprofile)
def sortLines(s):
lines = s.split("\n")
lines = [ line.strip() for line in lines if len(line) >= 2 ]
lines.sort()
return lines
# These next 2 functions are copied from test_glob.py.
def mkdirs(fname):
if os.path.exists(fname) or fname == '':
return
base, file = os.path.split(fname)
mkdirs(base)
os.mkdir(fname)
def deltree(fname):
if not os.path.exists(fname):
return
for f in os.listdir(fname):
fullname = os.path.join(fname, f)
if os.path.isdir(fullname):
deltree(fullname)
else:
try:
os.unlink(fullname)
except:
pass
try:
os.rmdir(fname)
except:
pass
class MhlibTests(unittest.TestCase):
def setUp(self):
deltree(_mhroot)
mkdirs(_mhpath)
writeProfile({'Path' : os.path.abspath(_mhpath),
'Editor': 'emacs',
'ignored-attribute': 'camping holiday'})
# Note: These headers aren't really conformant to RFC822, but
# mhlib shouldn't care about that.
# An inbox with a couple of messages.
writeMessage('inbox', 1,
{'From': 'Mrs. Premise',
'To': 'Mrs. Conclusion',
'Date': '18 July 2001'}, "Hullo, Mrs. Conclusion!\n")
writeMessage('inbox', 2,
{'From': 'Mrs. Conclusion',
'To': 'Mrs. Premise',
'Date': '29 July 2001'}, "Hullo, Mrs. Premise!\n")
# A folder with many messages
for i in range(5, 101)+range(101, 201, 2):
writeMessage('wide', i,
{'From': 'nowhere', 'Subject': 'message #%s' % i},
"This is message number %s\n" % i)
# A deeply nested folder
def deep(folder, n):
writeMessage(folder, n,
{'Subject': 'Message %s/%s' % (folder, n) },
"This is message number %s in %s\n" % (n, folder) )
deep('deep/f1', 1)
deep('deep/f1', 2)
deep('deep/f1', 3)
deep('deep/f2', 4)
deep('deep/f2', 6)
deep('deep', 3)
deep('deep/f2/f3', 1)
deep('deep/f2/f3', 2)
def tearDown(self):
deltree(_mhroot)
def test_basic(self):
writeContext('inbox')
writeCurMessage('inbox', 2)
mh = getMH()
eq = self.assertEquals
eq(mh.getprofile('Editor'), 'emacs')
eq(mh.getprofile('not-set'), None)
eq(mh.getpath(), os.path.abspath(_mhpath))
eq(mh.getcontext(), 'inbox')
mh.setcontext('wide')
eq(mh.getcontext(), 'wide')
eq(readFile(os.path.join(_mhpath, 'context')),
"Current-Folder: wide\n")
mh.setcontext('inbox')
inbox = mh.openfolder('inbox')
eq(inbox.getfullname(),
os.path.join(os.path.abspath(_mhpath), 'inbox'))
eq(inbox.getsequencesfilename(),
os.path.join(os.path.abspath(_mhpath), 'inbox', '.mh_sequences'))
eq(inbox.getmessagefilename(1),
os.path.join(os.path.abspath(_mhpath), 'inbox', '1'))
def test_listfolders(self):
mh = getMH()
eq = self.assertEquals
folders = mh.listfolders()
folders.sort()
eq(folders, ['deep', 'inbox', 'wide'])
#link counts from os.stat always return 0 in jython, which causes
#lisallfolders and listsubfolders to return empty lists.
if not sys.platform.startswith("java"):
folders = mh.listallfolders()
folders.sort()
tfolders = map(normF, ['deep', 'deep/f1', 'deep/f2', 'deep/f2/f3',
'inbox', 'wide'])
tfolders.sort()
eq(folders, tfolders)
folders = mh.listsubfolders('deep')
folders.sort()
eq(folders, map(normF, ['deep/f1', 'deep/f2']))
folders = mh.listallsubfolders('deep')
folders.sort()
eq(folders, map(normF, ['deep/f1', 'deep/f2', 'deep/f2/f3']))
eq(mh.listsubfolders(normF('deep/f2')), [normF('deep/f2/f3')])
eq(mh.listsubfolders('inbox'), [])
eq(mh.listallsubfolders('inbox'), [])
def test_sequence(self):
mh = getMH()
eq = self.assertEquals
writeCurMessage('wide', 55)
f = mh.openfolder('wide')
all = f.listmessages()
eq(all, range(5, 101)+range(101, 201, 2))
eq(f.getcurrent(), 55)
f.setcurrent(99)
eq(readFile(os.path.join(_mhpath, 'wide', '.mh_sequences')),
'cur: 99\n')
def seqeq(seq, val):
eq(f.parsesequence(seq), val)
seqeq('5-55', range(5, 56))
seqeq('90-108', range(90, 101)+range(101, 109, 2))
seqeq('90-108', range(90, 101)+range(101, 109, 2))
seqeq('10:10', range(10, 20))
seqeq('10:+10', range(10, 20))
seqeq('101:10', range(101, 121, 2))
seqeq('cur', [99])
seqeq('.', [99])
seqeq('prev', [98])
seqeq('next', [100])
seqeq('cur:-3', [97, 98, 99])
seqeq('first-cur', range(5, 100))
seqeq('150-last', range(151, 201, 2))
seqeq('prev-next', [98, 99, 100])
lowprimes = [5, 7, 11, 13, 17, 19, 23, 29]
lowcompos = [x for x in range(5, 31) if not x in lowprimes | ]
f.putsequences({'cur': [5],
'lowprime': lowprimes,
'lowcompos': lowcompos})
seqs = readFile(os.path.join(_mhpath, 'wide', '.mh_sequences'))
seqs = sortLines(seqs)
eq(seqs, ["cur: 5",
"lowcompos: 6 8-10 12 14-16 18 20-22 24-28 30",
"lowprime: 5 7 11 13 | 17 19 23 29"])
seqeq('lowprime', lowprimes)
seqeq('lowprime:1', [5])
seqeq('lowpri |
akun/pycon2015 | tests/test_file_rw.py | Python | mit | 1,886 | 0 | #!/usr/bin/env python
# coding=utf-8
import os
import unittest
from mock import mock_open, patch
from pycon2015.file_rw import read_dollar, write_rmb
class FileTestCase(unittest.TestCase):
test_dir = os.path.dirname(os.path.abspath(__file__))
class FileReadTestCase(FileTestCase):
def test_read(self) | :
file_path = os.path.join(self.test_dir, 'money.txt')
money_list = read_dollar(file_path)
self.assertEqual(money_list, ['1', '2', '3', '4'])
self.assertItemsEqual(money_list, ['4', '1', '2', '3'])
def test_read_big_file(self):
test_list = [str(i) for i in range(12345)]
fake_open = mock_ | open(read_data=os.linesep.join(test_list))
fake_path = '/it/is/a/fake/path'
money_list = []
with patch('__builtin__.open', fake_open):
money_list = read_dollar(fake_path)
self.assertEqual(money_list, test_list)
fake_open.assert_called_once_with(fake_path)
class FileWriteTestCase(FileTestCase):
def setUp(self):
self.file_path = os.path.join(self.test_dir, 'money_w.txt')
def tearDown(self):
if os.path.isfile(self.file_path):
os.unlink(self.file_path)
def test_write(self):
write_rmb(self.file_path, range(10))
money_list = []
with open(self.file_path) as f:
money_list = [i.strip() for i in f.readlines()]
self.assertEqual(money_list, [str(i) for i in range(10)])
def test_write_big_file(self):
max_rmb = 12345
fake_open = mock_open()
fake_path = '/it/is/a/fake/path'
with patch('__builtin__.open', fake_open):
write_rmb(fake_path, range(max_rmb))
fake_open.assert_called_once_with(fake_path, 'w')
fake_open().write.assert_called_once_with(
os.linesep.join([str(i) for i in range(max_rmb)])
)
|
dwfreed/mitmproxy | mitmproxy/proxy/protocol/websocket.py | Python | mit | 7,347 | 0.002314 | import os
import socket
import struct
from OpenSSL import SSL
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy.proxy.protocol import base
from mitmproxy.net import tcp
from mitmproxy.net import websockets
from mitmproxy.websocket import WebSocketFlow, WebSocketBinaryMessage, WebSocketTextMessage
class WebSocketLayer(base.Layer):
"""
WebSocket layer to intercept, modify, and forward WebSocket messages.
Only version 13 is supported (as specified in RFC6455).
Only HTTP/1.1-initiated connections are supported.
The client starts by sending an Upgrade-request.
In order to determine the handshake and negotiate the correct protocol
and extensions, the Upgrade-request is forwarded to the server.
The response from the server is then parsed and negotiated settings are extracted.
Finally the handshake is completed by forwarding the server-response to the client.
After that, only WebSocket frames are exchanged.
PING/PONG frames pass through and must be answered by the other endpoint.
CLOSE frames are forwarded before this WebSocketLayer terminates.
This layer is transparent to any negotiated extensions.
This layer is transparent to any negotiated subprotocols.
Only raw frames are forwarded to the other endpoint.
WebSocket messages are stored in a WebSocketFlow.
"""
def __init__(self, ctx, handshake_flow):
super().__init__(ctx)
self.handshake_flow = handshake_flow
self.flow = None # type: WebSocketFlow
self.client_frame_buffer = []
self.server_frame_buffer = []
def _handle_frame(self, frame, source_conn, other_conn, is_server):
if frame.header.opcode & 0x8 == 0:
return self._handle_data_frame(frame, source_conn, other_conn, is_server)
elif frame.header.opcode in (websockets.OPCODE.PING, websockets.OPCODE.PONG):
return self._handle_ping_pong(frame, source_conn, other_conn, is_server)
elif frame.header.opcode == websockets.OPCODE.CLOSE:
return self._handle_close(frame, source_conn, other_conn, is_server)
else:
return self._handle_unknown_frame(frame, source_conn, other_conn, is_server)
def _handle_data_frame(self, frame, source_conn, other_conn, is_server):
fb = self.server_frame_buffer if is_server else self.client_frame_buffer
fb.append(frame)
if frame.header.fin:
payload = b''.join(f.payload for f in fb)
original_chunk_sizes = [len(f.payload) for f in fb]
message_type = fb[0].header.opcode
compressed_message = fb[0].header.rsv1
fb.clear()
if message_type == websockets.OPCODE.TEXT:
t = WebSocketTextMessage
else:
t = WebSocketBinaryMessage
websocket_message = t(self.flow, not is_server, payload)
length = len(websocket_message.content)
self.flow.messages.append(websocket_message)
self.channel.ask("websocket_message", self.flow)
def get_chunk(payload):
if len(payload) == length:
# message has the same length, we can reuse the same sizes
pos = 0
for s in original_chunk_sizes:
yield payload[pos:pos + s]
pos += s
else:
# just re-chunk everything into 10kB frames
chunk_size = 10240
chunks = range(0, len(payload), chunk_size)
for i in chunks:
yield payload[i:i + chunk_size]
frms = [
websockets.Frame(
payload=chunk,
opcode=frame.header.opcode,
mask=(False if is_server else 1),
masking_key=(b'' if is_server else os.urandom(4)))
for chunk in get_chunk(websocket_message.content)
]
if len(frms) > 0:
frms[-1].header.fin = True
else:
frms.append(websockets.Frame(
fin=True,
opcode=websockets.OPCODE.CONTINUE,
mask=(False if is_server else 1),
masking_key=(b'' if is_server else os.urandom(4))))
frms[0].header.opcode = message_type
frms[0].header.rsv1 = compressed_message
for frm in frms:
other_conn.send(bytes(frm))
return True
def _handle_ping_pong(self, frame, source_conn, other_conn, is_server):
# just forward the ping/pong to the other side
other_conn.send(bytes(frame))
return True
def _handle_close(self, frame, source_conn, other_conn, is_server):
self.flow.close_sender = "server" if is_server else "client"
if len(frame.payload) >= 2:
code, = struct.unpack('!H', frame.payload[:2])
self.flow.close_code = code
self.flow.close_message = websockets.CLOSE_REASON.get_name(code, default='unknown status code')
if len(frame.payload) > 2:
self.flow.close_reason = frame.payload[2:]
other_conn.send(bytes(frame))
# initiate close handshake
return False
def _handle_unknown_frame(self, frame, source_conn, other_conn, is_server):
# unknown frame - just forward it
other_conn.send(bytes(frame))
sender = "server" if is_server else "client"
self.log("Unknown WebSocket frame received from {}".format(sender), "info", [repr(frame)])
return True
def __call__(self):
self.flow = WebSocketFlow(self.client_conn, self.server_conn, self.handshake_flow, self)
self.flow.metadata['websocket_handshake'] = self.handshake_flow
self.handshake_flow.metadata['websocket_flow'] = self.flow
self.channel.ask("websocket_start", self.flow)
client = self.client_conn.connection
server = self.server_conn.connection
conns = [client, server]
close_received = False
try:
while n | ot self.channel.should_exit.is_set():
r = tcp.ssl_rea | d_select(conns, 0.1)
for conn in r:
source_conn = self.client_conn if conn == client else self.server_conn
other_conn = self.server_conn if conn == client else self.client_conn
is_server = (conn == self.server_conn.connection)
frame = websockets.Frame.from_file(source_conn.rfile)
cont = self._handle_frame(frame, source_conn, other_conn, is_server)
if not cont:
if close_received:
return
else:
close_received = True
except (socket.error, exceptions.TcpException, SSL.Error) as e:
s = 'server' if is_server else 'client'
self.flow.error = flow.Error("WebSocket connection closed unexpectedly by {}: {}".format(s, repr(e)))
self.channel.tell("websocket_error", self.flow)
finally:
self.channel.tell("websocket_end", self.flow)
|
ESOedX/edx-platform | openedx/core/djangoapps/schedules/tests/factories.py | Python | agpl-3.0 | 1,310 | 0.000763 | """
Factories for schedules tests
"""
from __future__ import absolute_import
import factory
import pytz
from openedx.core.djangoapps.schedules import models
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from student.tests.factories import CourseEnrollmentFactory
class ScheduleExperienceFactory(factory.DjangoModelFactory):
class Meta(object):
model = models.ScheduleExperience
experience_type = models.ScheduleExperience.EXPERIENCES.default
class ScheduleFactory(factory.DjangoModelFactory):
class Meta(object):
model = models.Schedule
start = factory.Faker('future_datetime', tzinfo=pytz.UTC | )
upgrade_deadline = factory.Faker('future_datetime', tzinfo=pytz.UTC)
enrollm | ent = factory.SubFactory(CourseEnrollmentFactory)
experience = factory.RelatedFactory(ScheduleExperienceFactory, 'schedule')
class ScheduleConfigFactory(factory.DjangoModelFactory):
class Meta(object):
model = models.ScheduleConfig
site = factory.SubFactory(SiteFactory)
create_schedules = True
enqueue_recurring_nudge = True
deliver_recurring_nudge = True
enqueue_upgrade_reminder = True
deliver_upgrade_reminder = True
enqueue_course_update = True
deliver_course_update = True
hold_back_ratio = 0
|
peerdrive/peerdrive | client/peerdrive/struct.py | Python | gpl-3.0 | 10,142 | 0.033429 | # vim: set fileencoding=utf-8 :
#
# PeerDrive
# Copyright (C) 2011 Jan Klötzke <jan DOT kloetzke AT freenet DOT de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import struct, copy
from . import connector
# returns (result, conflicts)
def merge(base, versions):
baseType = type(base)
if any([not isinstance(o, baseType) for o in versions]):
# the type changed -> unmergable
return (versions[0], True)
if isinstance(base, dict):
return __mergeDict(base, versions)
elif isinstance(base, (list, tuple)):
return __mergeList(base, versions)
eli | f (isinstance(base, basestring)
or isinstance(base, bool)
or isinstance(base, connector.RevLink)
or isins | tance(base, connector.DocLink)
or isinstance(base, float)
or isinstance(base, (int, long))):
changes = [o for o in versions if o != base]
count = len(set(changes))
if count > 1:
# take the latest change
return (changes[0], True)
elif count == 1:
return (changes[0], False)
else:
return (base, False)
else:
raise TypeError("Invalid object: " + repr(o))
def __mergeDict(base, versions):
baseKeys = set(base.keys())
conflict = False
added = { }
removed = set()
# get differences
for ver in versions:
verKeys = set(ver.keys())
# removed keys
removed.update(baseKeys - verKeys)
# added keys
newKeys = verKeys - baseKeys
for key in newKeys:
if key in added:
# already added; the same?
if ver[key] != added[key]:
conflict = True
else:
added[key] = ver[key]
# construct new dict
newDict = {}
for (key, oldValue) in base.items():
if key in removed:
# modify/delete conflict?
remainingValues = [d[key] for d in versions if (key in d)]
if any([other != oldValue for other in remainingValues]):
conflict = True
# yes :( -> take the latest version
if key in versions[0]:
# the latest version still has it.. retain
(newValue, newConflict) = merge(oldValue, remainingValues)
newDict[key] = newValue
else:
# the latest version deleted it.. bye bye
pass
else:
# descend
(newValue, newConflict) = merge(oldValue, [d[key] for d in versions])
conflict = conflict or newConflict
newDict[key] = newValue
for (key, newValue) in added.items():
newDict[key] = newValue
# return new dict
return (newDict, conflict)
def __mergeList(base, versions):
added = []
removed = []
for ver in versions:
# check for removed items
for item in base:
if item not in ver:
if item not in removed:
removed.append(item)
# check for added items
for item in ver:
if item not in base:
if item not in added:
added.append(item)
# apply diff
newList = base[:]
for item in removed:
newList.remove(item)
for item in added:
newList.append(item)
return (newList, False)
###############################################################################
# PeerDrive folder object
###############################################################################
class Folder(object):
UTIs = ["org.peerdrive.folder", "org.peerdrive.store"]
def __init__(self, link = None):
self.__didCache = False
if link:
link.update()
self.__rev = link.rev()
self.__doc = link.doc()
self.__store = link.store()
self.__load()
else:
self.__content = []
self.__rev = None
self.__doc = None
self.__store = None
def __load(self):
if not self.__rev:
raise IOError("Folder not found")
uti = connector.Connector().stat(self.__rev, [self.__store]).type()
if uti not in Folder.UTIs:
raise IOError("Not a folder: "+uti)
with connector.Connector().peek(self.__store, self.__rev) as r:
self.__meta = r.getData('/org.peerdrive.annotation')
content = r.getData('/org.peerdrive.folder')
self.__content = [ (None, l) for l in content ]
def __doCache(self):
if not self.__didCache:
self.__content = [ (readTitle(i['']), i) for (t, i) in
self.__content ]
self.__didCache = True
def create(self, store, name=None):
if self.__rev or self.__doc:
raise IOError("Not new")
self.__store = store
if not name:
name = "New folder"
self.__meta = { "title" : name }
for (descr, item) in self.__content:
item[''].update(self.__store)
content = [ item for (descr, item) in self.__content ]
w = connector.Connector().create(store, "org.peerdrive.folder", "")
try:
w.setData('', {
"org.peerdrive.folder" : content,
"org.peerdrive.annotation" : self.__meta
})
w.setFlags([connector.Stat.FLAG_STICKY])
w.commit()
self.__rev = w.getRev()
self.__doc = w.getDoc()
return w
except:
w.close()
raise
def save(self):
if self.__rev and self.__doc and self.__store:
content = [ item for (descr, item) in self.__content ]
with connector.Connector().update(self.__store, self.__doc, self.__rev) as w:
w.setData('', {
"org.peerdrive.folder" : content,
"org.peerdrive.annotation" : self.__meta
})
self.__rev = w.commit()
else:
raise IOError('Not writable')
def title(self):
if "title" in self.__meta:
return self.__meta["title"]
return "Unnamed folder"
def __index(self, title, fail=True):
i = 0
for (key, item) in self.__content:
if key == title:
return i
else:
i += 1
if fail:
raise IndexError(title)
else:
return None
def __len__(self):
return len(self.__content)
def __getitem__(self, i):
if isinstance(i, basestring):
self.__doCache()
i = self.__index(i)
return self.__content[i][1]['']
def __delitem__(self, select):
if isinstance(i, basestring):
self.__doCache()
i = self.__index(i)
del self.__content[i]
def __contains__(self, name):
self.__doCache()
i = self.__index(name, False)
return i is not None
def append(self, link):
if self.__store:
link.update(self.__store)
self.__content.append( (readTitle(link), { '' : link }) )
def get(self, name):
self.__doCache()
i = self.__index(name, False)
if i is None:
return None
else:
return self.__content[i][1]['']
def items(self):
self.__doCache()
return [ (name, item['']) for (name, item) in self.__content ]
def remove(self, name, link):
self.__doCache()
self.__content.remove((name, {'' : link}))
def getDoc(self):
return self.__doc
def getRev(self):
return self.__rev
# tiny helper function
def readTitle(link, default=None):
rev = link.rev()
if rev:
try:
with connector.Connector().peek(link.store(), rev) as r:
return r.getData("/org.peerdrive.annotation/title")
except IOError:
pass
return default
class FSTab(object):
def __init__(self):
self.__changed = False
self.__store = connector.Connector().enum().sysStore().sid
self.__doc = Folder(connector.DocLink(self.__store, self.__store))["fstab"].doc()
self.__rev = connector.Connector().lookupDoc(self.__doc).rev(self.__store)
with connector.Connector().peek(self.__store, self.__rev) as r:
self.__fstab = r.getData('/org.peerdrive.fstab')
def save(self):
if not self.__changed:
return
with connector.Connector().update(self.__store, self.__doc, self.__rev) as w:
w.setData('/org.peerdrive.fstab', self.__fstab)
w.commit()
self.__rev = w.getRev()
self.__changed = False
def knownLabels(self):
return self.__fstab.keys()
def mount(self, label):
src = self.__fstab[label]["src"]
type = self.__fstab[label].get("type", "file")
options = self.__fstab[label].get("options", "")
credentials = self.__fstab[label].get("credentials", "")
return connector.Connector().mount(src, label, type, options, credentials)
def get(self |
smiller171/ansible | lib/ansible/module_utils/facts.py | Python | gpl-3.0 | 132,404 | 0.004955 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import stat
import time
import array
import shlex
import errno
import fcntl
import fnmatch
import glob
import platform
import re
import signal
import socket
import struct
import datetime
import getpass
import pwd
import ConfigParser
import StringIO
from string import maketrans
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import json
# Detect python-json which is incompatible and fallback to simplejson in
# that case
try:
json.loads
json.dumps
except AttributeError:
raise ImportError
except ImportError:
import simplejson as json
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
# --------------------------------------------------------------
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message="Timer expired"):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
return decorator
# --------------------------------------------------------------
class Facts(object):
"""
This class should only attempt to populate those facts that
are mostly generic to all systems. This includes platform facts,
service facts (e.g. ssh keys or selinux), and distribution facts.
Anything that requires extensive code or may have more than one
possible implementation to establish facts for a given topic shou | ld
subclass Facts.
"""
# i86pc is a Solaris and derivatives-ism
_I386RE = re.compile(r'i([3456]86|86pc)')
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
OSDIST_LIST = ( ('/etc/oracle-release', 'OracleLinux'),
('/etc/slackware-version', 'Slackware'),
('/etc/redhat-releas | e', 'RedHat'),
('/etc/vmware-release', 'VMwareESX'),
('/etc/openwrt_release', 'OpenWrt'),
('/etc/system-release', 'OtherLinux'),
('/etc/alpine-release', 'Alpine'),
('/etc/release', 'Solaris'),
('/etc/arch-release', 'Archlinux'),
('/etc/SuSE-release', 'SuSE'),
('/etc/os-release', 'SuSE'),
('/etc/gentoo-release', 'Gentoo'),
('/etc/os-release', 'Debian'),
('/etc/lsb-release', 'Mandriva'),
('/etc/os-release', 'NA'),
)
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
# package manager, put the preferred one last. If there is an
# ansible module, use that as the value for the 'name' key.
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
{ 'path' : '/usr/bin/dnf', 'name' : 'dnf' },
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
{ 'path' : '/usr/local/bin/brew', 'name' : 'homebrew' },
{ 'path' : '/sbin/apk', 'name' : 'apk' },
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
{ 'path' : '/usr/bin/emerge', 'name' : 'portage' },
{ 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
{ 'path' : '/usr/bin/pkg', 'name' : 'pkg' },
{ 'path' : '/usr/bin/xbps-install','name' : 'xbps' },
{ 'path' : '/usr/local/sbin/pkg', 'name' : 'pkgng' },
]
def __init__(self, load_on_init=True):
self.facts = {}
if load_on_init:
self.get_platform_facts()
self.get_distribution_facts()
self.get_cmdline()
self.get_public_ssh_host_keys()
self.get_selinux_facts()
self.get_fips_facts()
self.get_pkg_mgr_facts()
self.get_service_mgr_facts()
self.get_lsb_facts()
self.get_date_time_facts()
self.get_user_facts()
self.get_local_facts()
self.get_env_facts()
self.get_dns_facts()
def populate(self):
return self.facts
# Platform
# platform.system() can be Linux, Darwin, Java, or Windows
def get_platform_facts(self):
self.facts['system'] = platform.system()
self.facts['kernel'] = platform.release()
self.facts['machine'] = platform.machine()
self.facts['python_version'] = platform.python_version()
self.facts['fqdn'] = socket.getfqdn()
self.facts['hostname'] = platform.node().split('.')[0]
self.facts['nodename'] = platform.node()
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
self.facts['userspace_bits'] = arch_bits.replace('bit', '')
if self.facts['machine'] == 'x86_64':
self.facts['architecture'] = self.facts['machine']
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
elif Facts._I386RE.search(self.facts['machine']):
self.facts['architecture'] = 'i386'
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
else:
self.facts['architecture'] = self.facts['machine']
if self.facts['system'] == 'Linux':
self.get_distribution_facts()
elif self.facts['system'] == 'AIX':
# Attempt to use getconf to figure out architecture
# fall back to bootinfo if needed
if module.get_bin_path('getconf'):
rc, out, err = module.run_command([module.get_bin_path('getconf'),
'MACHINE_ARCHITECTURE'])
data = out.split('\n')
self.facts['architecture'] = data[0]
else:
rc, |
bkosawa/admin-recommendation | admin_recommendation/urls.py | Python | apache-2.0 | 879 | 0 | """recomendation URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from othe | r_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^nested_admin/', include('nested_admin.urls')),
url(r'^api/', inclu | de('crawler.urls'))
]
|
pkimber/invoice | invoice/tests/test_invoice_print.py | Python | apache-2.0 | 1,817 | 0 | # -*- encoding: utf-8 -*-
from datetime import date
from dj | ango.test import TestCase
from contact.tests.factories import ContactFactory
from crm.tests.factories import TicketFactory
from finance.tests.factories import VatSettingsFactory
from invoice.models import InvoiceError
from invoice.service import (
InvoiceCreate,
InvoicePrint,
)
from invoice.tests.factories import (
InvoiceContactFactory,
InvoiceFactory | ,
InvoiceLineFactory,
InvoiceSettingsFactory,
TimeRecordFactory,
)
from login.tests.factories import UserFactory
class TestInvoicePrint(TestCase):
def test_invoice_create_pdf(self):
InvoiceSettingsFactory()
VatSettingsFactory()
contact = ContactFactory()
InvoiceContactFactory(contact=contact)
ticket = TicketFactory(contact=contact)
TimeRecordFactory(ticket=ticket, date_started=date(2013, 12, 1))
invoice = InvoiceCreate().create(
UserFactory(),
contact,
date(2013, 12, 31)
)
InvoicePrint().create_pdf(invoice, None)
def test_invoice_create_pdf_no_lines(self):
"""Cannot create a PDF if the invoice has no lines"""
invoice = InvoiceFactory()
self.assertRaises(
InvoiceError,
InvoicePrint().create_pdf,
invoice,
None
)
def test_invoice_create_pdf_not_draft(self):
"""Cannot create a PDF if the invoice has already been printed"""
InvoiceSettingsFactory()
VatSettingsFactory()
invoice = InvoiceFactory()
InvoiceLineFactory(invoice=invoice)
InvoicePrint().create_pdf(invoice, None)
self.assertRaises(
InvoiceError,
InvoicePrint().create_pdf,
invoice,
None
)
|
mytliulei/DCNRobotInstallPackages | windows/win32/paramiko-1.14.0/tests/test_auth.py | Python | apache-2.0 | 8,501 | 0.001412 | # Copyright (C) 2008 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Some unit tests for authenticating over a Transport.
"""
import sys
import threading
import unittest
from paramiko import Transport, ServerInterface, RSAKey, DSSKey, \
BadAuthenticationType, InteractiveQuery, \
AuthenticationException
from paramiko import AUTH_FAILED, AUTH_PARTIALLY_SUCCESSFUL, AUTH_SUCCESSFUL
from paramiko.py3compat import u
from tests.loop import LoopSocket
from tests.util import test_path
_pwd = u('\u2022')
class NullServer (ServerInterface):
paranoid_did_password = False
paranoid_did_public_key = False
paranoid_key = DSSKey.from_private_key_file(test_path('test_dss.key'))
def get_allowed_auths(self, username):
if username == 'slowdive':
return 'publickey,password'
if username == 'paranoid':
if not self.paranoid_did_password and not self.paranoid_did_public_key:
return 'publickey,password'
elif self.paranoid_did_password:
return 'publickey'
else:
return 'password'
if username == 'commie':
return 'keyboard-interactive'
if username == 'utf8':
return 'password'
if username == 'non-utf8':
return 'password'
return 'publickey'
def check_auth_password(self, username, password):
if (username == 'slowdive') and (password == 'pygmalion'):
return AUTH_SUCCESSFUL
if (username == 'paranoid') and (password == 'paranoid'):
# 2-part auth (even openssh doesn't support this)
self.paranoid_did_password = True
if self.paranoid_did_public_key:
return AUTH_SUCCESSFUL
return AUTH_PARTIALLY_SUCCESSFUL
if (username == 'utf8') and (password == _pwd):
return AUTH_SUCCESSFUL
if (username == 'non-utf8') and (password == '\xff'):
return AUTH_SUCCESSFUL
if username == 'bad-server':
raise Exception("Ack!")
return AUTH_FAILED
def check_auth_publickey(self, username, key):
if (username == 'paranoid') and (key == self.paranoid_key):
# 2-part auth
self.paranoid_did_public_key = True
if self.paranoid_did_password:
return AUTH_SUCCESSFUL
return AUTH_PARTIALLY_SUCCESSFUL
return AUTH_FAILED
def check_auth_interactive(self, username, submethods):
if username == 'commie':
self.username = username
return InteractiveQuery('password', 'Please enter a password.', ('Password', False))
return AUTH_FAILED
def check_auth_interactive_response(self, responses):
if self.username == 'commie':
if (len(responses) == 1) and (responses[0] == 'cat'):
return AUTH_SUCCESSFUL
return AUTH_FAILED
class AuthTest (unittest.TestCase):
def setUp(self):
self.socks = LoopSocket()
self.sockc = LoopSocket()
self.sockc.link(self.socks)
self.tc = Transport(self.sockc)
self.ts = Transport(self.socks)
def tearDown(self):
self.tc.close()
self.ts.close()
self.socks.close()
self.sockc.close()
def start_server(self):
host_key = RSAKey.from_private_key_file(test_path('test_rsa.key'))
self.public_host_key = RSAKey(data=host_key.asbytes())
self.ts.add_server_key(host_key)
self.event = threading.Event()
self.server = NullServer()
self.assertTrue(not self.event.isSet())
self.ts.start_server(self.event, self.server)
def verify_finished(self):
self.event.wait(1.0)
self.assertTrue(self.event.isSet())
self.assertTrue(self.ts.is_active())
def test_1_bad_auth_type(self):
"""
verify that we get the right exception when an unsupported auth
type is requested.
"""
self.start_server()
try:
self.tc.connect(hostkey=self.public_host_key,
username='unknown', password='error')
self.assertTrue(False)
except:
etype, evalue, etb = sys.exc_info()
self.assertEqual(BadAuthenticationType, etype)
self.assertEqual(['publickey'], evalue.allowed_types)
def test_2_bad_password(self):
"""
verify that a bad password gets the right exception, and that a retry
with the right p | assword works.
"""
self.start_server()
self.tc.connect(hostkey=self.public_host_key)
try:
self.tc.auth_password(username='slowdive', password='error')
self.assertTrue(False)
except:
etype, evalue, etb = sys.exc_info()
self.assertTrue(issubclass(etype, AuthenticationException))
self.tc.auth_password(username='slowdive', password='pygmalion')
self.verify_finished()
def t | est_3_multipart_auth(self):
"""
verify that multipart auth works.
"""
self.start_server()
self.tc.connect(hostkey=self.public_host_key)
remain = self.tc.auth_password(username='paranoid', password='paranoid')
self.assertEqual(['publickey'], remain)
key = DSSKey.from_private_key_file(test_path('test_dss.key'))
remain = self.tc.auth_publickey(username='paranoid', key=key)
self.assertEqual([], remain)
self.verify_finished()
def test_4_interactive_auth(self):
"""
verify keyboard-interactive auth works.
"""
self.start_server()
self.tc.connect(hostkey=self.public_host_key)
def handler(title, instructions, prompts):
self.got_title = title
self.got_instructions = instructions
self.got_prompts = prompts
return ['cat']
remain = self.tc.auth_interactive('commie', handler)
self.assertEqual(self.got_title, 'password')
self.assertEqual(self.got_prompts, [('Password', False)])
self.assertEqual([], remain)
self.verify_finished()
def test_5_interactive_auth_fallback(self):
"""
verify that a password auth attempt will fallback to "interactive"
if password auth isn't supported but interactive is.
"""
self.start_server()
self.tc.connect(hostkey=self.public_host_key)
remain = self.tc.auth_password('commie', 'cat')
self.assertEqual([], remain)
self.verify_finished()
def test_6_auth_utf8(self):
"""
verify that utf-8 encoding happens in authentication.
"""
self.start_server()
self.tc.connect(hostkey=self.public_host_key)
remain = self.tc.auth_password('utf8', _pwd)
self.assertEqual([], remain)
self.verify_finished()
def test_7_auth_non_utf8(self):
"""
verify that non-utf-8 encoded passwords can be used for broken
servers.
"""
self.start_server()
self.tc.connect(hostkey=self.public_host_key)
remain = self.tc.auth_password('non-utf8', '\xff')
self.assertEqual([], remain)
self.verify_finished()
def test_8_auth_gets_disconnected(self):
"""
verify that we catch a server disconnecting during auth, and report
it as an au |
icoxfog417/kanaria | tests/core/test_service_db.py | Python | apache-2.0 | 987 | 0.001013 | import unittest
from datetime import datetime
from kanaria.core.service.db import MongoDBService
class ModelExample(object):
def __init__(self, title="", description="", date=None):
self.title = title
self.description = description
self.date = date if date else datetime.now()
self._private = False
def method(self):
pass
class TestDBService(unittest.TestCase):
def test_serialize(self):
db = MongoDBService()
m = ModelExample("test", "test_serialize")
name = db.get_collection_name( | m)
dic = db.object_to_dict(m)
self.assertTrue("model_example", name)
self.assertTrue(m.title, dic["title"])
self.asse | rtTrue(m.date, dic["date"])
self.assertFalse("_private" in dic)
self.assertFalse("method" in dic)
def test_get_collection(self):
db = MongoDBService()
collection = db.get_collection(ModelExample)
self.assertTrue(collection)
|
katrid/keops | keops/report_urls.py | Python | bsd-3-clause | 457 | 0.002188 | from django.conf import settings
from django.conf.urls import url
import django.views.static
from keops.api impor | t site
import keops.views.reports
urlpatterns = [
url(r'^web/reports/', keops.views.reports.dashboard),
url(r' | ^web/reports/view/', keops.views.reports.report),
url(r'^api/reports/choices/', keops.views.reports.choices),
url(r'^reports/temp/(?P<path>.*)$', django.views.static.serve, {'document_root': settings.REPORT_ROOT})
]
|
hankcs/HanLP | hanlp/common/transform.py | Python | apache-2.0 | 15,235 | 0.000788 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-05-03 14:44
import logging
import os
from abc import ABC, abstractmethod
from typing import Tuple, Union, List
from hanlp_common.constant import EOS, PAD
from hanlp_common.structure import SerializableDict
from hanlp_common.configurable import Config | urable
from hanlp.common.vocab import Vocab
from hanlp.utils.io_util import get_resource
from hanlp_common.io import load_json
from hanlp_common.reflection import classpath_of, str_to_type
from hanlp.utils.string_util import ispunct
class ToIndex(ABC):
def __init__(self, vocab: Vocab = None) -> None:
super().__init__()
if vocab is None:
vocab = Vocab()
self.vocab = vocab
@abstractmethod
def __call__(self, sample):
pass
de | f save_vocab(self, save_dir, filename='vocab.json'):
vocab = SerializableDict()
vocab.update(self.vocab.to_dict())
vocab.save_json(os.path.join(save_dir, filename))
def load_vocab(self, save_dir, filename='vocab.json'):
save_dir = get_resource(save_dir)
vocab = SerializableDict()
vocab.load_json(os.path.join(save_dir, filename))
self.vocab.copy_from(vocab)
class FieldToIndex(ToIndex):
def __init__(self, src, vocab: Vocab, dst=None) -> None:
super().__init__(vocab)
self.src = src
if not dst:
dst = f'{src}_id'
self.dst = dst
def __call__(self, sample: dict):
sample[self.dst] = self.vocab(sample[self.src])
return sample
def save_vocab(self, save_dir, filename=None):
if not filename:
filename = f'{self.dst}_vocab.json'
super().save_vocab(save_dir, filename)
def load_vocab(self, save_dir, filename=None):
if not filename:
filename = f'{self.dst}_vocab.json'
super().load_vocab(save_dir, filename)
class VocabList(list):
def __init__(self, *fields) -> None:
super().__init__()
for each in fields:
self.append(FieldToIndex(each))
def append(self, item: Union[str, Tuple[str, Vocab], Tuple[str, str, Vocab], FieldToIndex]) -> None:
if isinstance(item, str):
item = FieldToIndex(item)
elif isinstance(item, (list, tuple)):
if len(item) == 2:
item = FieldToIndex(src=item[0], vocab=item[1])
elif len(item) == 3:
item = FieldToIndex(src=item[0], dst=item[1], vocab=item[2])
else:
raise ValueError(f'Unsupported argument length: {item}')
elif isinstance(item, FieldToIndex):
pass
else:
raise ValueError(f'Unsupported argument type: {item}')
super(self).append(item)
def save_vocab(self, save_dir):
for each in self:
each.save_vocab(save_dir, None)
def load_vocab(self, save_dir):
for each in self:
each.load_vocab(save_dir, None)
class VocabDict(SerializableDict):
def __init__(self, *args, **kwargs) -> None:
"""A dict holding :class:`hanlp.common.vocab.Vocab` instances. When used as a transform, it transforms the field
corresponding to each :class:`hanlp.common.vocab.Vocab` into indices.
Args:
*args: A list of vocab names.
**kwargs: Names and corresponding :class:`hanlp.common.vocab.Vocab` instances.
"""
vocabs = dict(kwargs)
for each in args:
vocabs[each] = Vocab()
super().__init__(vocabs)
def save_vocabs(self, save_dir, filename='vocabs.json'):
"""Save vocabularies to a directory.
Args:
save_dir: The directory to save vocabularies.
filename: The name for vocabularies.
"""
vocabs = SerializableDict()
for key, value in self.items():
if isinstance(value, Vocab):
vocabs[key] = value.to_dict()
vocabs.save_json(os.path.join(save_dir, filename))
def load_vocabs(self, save_dir, filename='vocabs.json', vocab_cls=Vocab):
"""Load vocabularies from a directory.
Args:
save_dir: The directory to load vocabularies.
filename: The name for vocabularies.
"""
save_dir = get_resource(save_dir)
vocabs = SerializableDict()
vocabs.load_json(os.path.join(save_dir, filename))
self._load_vocabs(self, vocabs, vocab_cls)
@staticmethod
def _load_vocabs(vd, vocabs: dict, vocab_cls=Vocab):
"""
Args:
vd:
vocabs:
vocab_cls: Default class for the new vocab
"""
for key, value in vocabs.items():
if 'idx_to_token' in value:
cls = value.get('type', None)
if cls:
cls = str_to_type(cls)
else:
cls = vocab_cls
vocab = cls()
vocab.copy_from(value)
vd[key] = vocab
else: # nested Vocab
# noinspection PyTypeChecker
vd[key] = nested = VocabDict()
VocabDict._load_vocabs(nested, value, vocab_cls)
def lock(self):
"""
Lock each vocabs.
"""
for key, value in self.items():
if isinstance(value, Vocab):
value.lock()
@property
def mutable(self):
status = [v.mutable for v in self.values() if isinstance(v, Vocab)]
return len(status) == 0 or any(status)
def __call__(self, sample: dict):
for key, value in self.items():
if isinstance(value, Vocab):
field = sample.get(key, None)
if field is not None:
sample[f'{key}_id'] = value(field)
return sample
def __getattr__(self, key):
if key.startswith('__'):
return dict.__getattr__(key)
return self.__getitem__(key)
def __setattr__(self, key, value):
return self.__setitem__(key, value)
def __getitem__(self, k: str) -> Vocab:
return super().__getitem__(k)
def __setitem__(self, k: str, v: Vocab) -> None:
super().__setitem__(k, v)
def summary(self, logger: logging.Logger = None):
"""Log a summary of vocabs using a given logger.
Args:
logger: The logger to use.
"""
for key, value in self.items():
if isinstance(value, Vocab):
report = value.summary(verbose=False)
if logger:
logger.info(f'{key}{report}')
else:
print(f'{key}{report}')
def put(self, **kwargs):
"""Put names and corresponding :class:`hanlp.common.vocab.Vocab` instances into self.
Args:
**kwargs: Names and corresponding :class:`hanlp.common.vocab.Vocab` instances.
"""
for k, v in kwargs.items():
self[k] = v
class NamedTransform(ABC):
def __init__(self, src: str, dst: str = None) -> None:
if dst is None:
dst = src
self.dst = dst
self.src = src
@abstractmethod
def __call__(self, sample: dict) -> dict:
return sample
class ConfigurableTransform(Configurable, ABC):
@property
def config(self):
return dict([('classpath', classpath_of(self))] +
[(k, v) for k, v in self.__dict__.items() if not k.startswith('_')])
@classmethod
def from_config(cls, config: dict):
"""
Args:
config:
kwargs:
config: dict:
Returns:
"""
cls = config.get('classpath', None)
assert cls, f'{config} doesn\'t contain classpath field'
cls = str_to_type(cls)
config = dict(config)
config.pop('classpath')
return cls(**config)
class ConfigurableNamedTransform(NamedTransform, ConfigurableTransform, ABC):
pass
class EmbeddingNamedTransform(ConfigurableNamedTransform, ABC):
def __init__(self, output_dim: int, src: str, dst: str) -> None:
super().__init__(src, dst)
self. |
dgellis90/nipype | nipype/interfaces/freesurfer/base.py | Python | bsd-3-clause | 6,473 | 0.000154 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The freesurfer module provides basic functions for interfacing with
freesurfer tools.
Currently these tools are supported:
* Dicom2Nifti: using mri_convert
* Resample: using mri_convert
Examples
--------
See the docstrings for the individual classes for 'working' examples.
"""
__docformat__ = 'restructuredtext'
from builtins import object
import os
from ..base import (CommandLine, Directory,
CommandLineInputSpec, isdefined,
traits, TraitedSpec, File)
from ...utils.filemanip import fname_presuffix
class Info(object):
""" Freesurfer subject directory and version information.
Examples
--------
>>> from nipype.interfaces.freesurfer import Info
>>> Info.version() # doctest: +SKIP
>>> Info.subjectsdir() # doctest: +SKIP
"""
@staticmethod
def version():
"""Check for freesurfer version on system
Find which freesurfer is being used....and get version from
/path/to/freesurfer/build-stamp.txt
Returns
-------
version : string
version number as string
or None if freesurfer version not found
"""
fs_home = os.getenv('FREESURFER_HOME')
if fs_home is None:
return None
versionfile = os.path.join(fs_home, 'build-stamp.txt')
if not os.path.exists(versionfile):
return None
fid = open(versionfile, 'rt')
version = fid.readline()
fid.close()
return version
@classmethod
def subjectsdir(cls):
"""Check the global SUBJECTS_DIR
Parameters
----------
subjects_dir : string
The system defined subjects directory
Returns
-------
subject_dir : string
Represents the current environment setting of SUBJECTS_DIR
"""
if cls.version():
return os.environ['SUBJECTS_DIR']
return None
class FSTraitedSpec(CommandLineInputSpec):
subjects_dir = Directory(exists=True, desc='subjects directory')
class FSCommand(CommandLine):
"""General support for FreeSurfer commands.
Every FS command accepts 'subjects_dir' input.
"""
input_spec = FSTraitedSpec
_subjects_dir = None
def __init__(self, **inputs):
super(FSCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._subjects_dir_update, 'subjects_dir')
if not self._subjects_dir:
self._subjects_dir = Info.subjectsdir()
if not isdefined(self.inputs.subjects_dir) and self._subjects_dir:
self.inputs.subjects_dir = self._subjects_dir
self._subjects_dir_update()
def _subjects_dir_update(self):
if self.inputs.subjects_dir:
self.inputs.environ.update({'SUBJECTS_DIR':
self.inputs.subjects_dir})
@classmethod
def set_default_subjects_dir(cls, subjects_dir):
cls._subjects_dir = subjects_dir
@property
def version(self):
return Info.version()
def run(self, **inputs):
if 'subjects_dir' in inputs:
self.inputs.subjects_dir = inputs['subjects_dir']
self._subjects_dir_update()
return super(FSCommand, self).run(**inputs)
def _gen_fname(self, basename, fname=None, cwd=None, suffix='_fs',
use_ext=True):
'''Define a generic mapping for a single outfile
The filename is potentially auto | generated by suffixing inputs.infile
| Parameters
----------
basename : string (required)
filename to base the new filename on
fname : string
if not None, just use this fname
cwd : string
prefix paths with cwd, otherwise os.getcwd()
suffix : string
default suffix
'''
if basename == '':
msg = 'Unable to generate filename for command %s. ' % self.cmd
msg += 'basename is not set!'
raise ValueError(msg)
if cwd is None:
cwd = os.getcwd()
fname = fname_presuffix(basename, suffix=suffix,
use_ext=use_ext, newpath=cwd)
return fname
@property
def version(self):
ver = Info.version()
if ver:
if 'dev' in ver:
return ver.rstrip().split('-')[-1] + '.dev'
else:
return ver.rstrip().split('-v')[-1]
class FSScriptCommand(FSCommand):
""" Support for Freesurfer script commands with log inputs.terminal_output """
_terminal_output = 'file'
_always_run = False
def __init__(self, **inputs):
super(FSScriptCommand, self).__init__(**inputs)
self.set_default_terminal_output(self._terminal_output)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['log_file'] = os.path.abspath('stdout.nipype')
return outputs
class FSScriptOutputSpec(TraitedSpec):
log_file = File('stdout.nipype', usedefault=True,
exists=True, desc="The output log")
class FSTraitedSpecOpenMP(FSTraitedSpec):
num_threads = traits.Int(desc='allows for specifying more threads')
class FSCommandOpenMP(FSCommand):
"""Support for FS commands that utilize OpenMP
Sets the environment variable 'OMP_NUM_THREADS' to the number
of threads specified by the input num_threads.
"""
input_spec = FSTraitedSpecOpenMP
_num_threads = None
def __init__(self, **inputs):
super(FSCommandOpenMP, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not self._num_threads:
self._num_threads = os.environ.get('OMP_NUM_THREADS', None)
if not isdefined(self.inputs.num_threads) and self._num_threads:
self.inputs.num_threads = int(self._num_threads)
self._num_threads_update()
def _num_threads_update(self):
if self.inputs.num_threads:
self.inputs.environ.update(
{'OMP_NUM_THREADS': str(self.inputs.num_threads)})
def run(self, **inputs):
if 'num_threads' in inputs:
self.inputs.num_threads = inputs['num_threads']
self._num_threads_update()
return super(FSCommandOpenMP, self).run(**inputs)
|
deathowl/huey | huey/consumer.py | Python | mit | 11,130 | 0.00027 | import datetime
import logging
import os
import signal
import threading
import time
from multiprocessing import Event as ProcessEvent
from multiprocessing import Process
try:
import gevent
from gevent import Greenlet
from gevent.event import Event as GreenEvent
except ImportError:
Greenlet = GreenEvent = None
from huey.exceptions import DataStoreGetException
from huey.exceptions import QueueException
from huey.exceptions import QueueReadException
from huey.exceptions import DataStorePutException
from huey.exceptions import QueueWriteException
from huey.exceptions import ScheduleAddException
from huey.exceptions import ScheduleReadException
from huey. | registry import registry
class BaseProcess(object):
def __init__(self, huey, utc):
self.huey = huey
self.utc = utc
def get_now(self):
| if self.utc:
return datetime.datetime.utcnow()
return datetime.datetime.now()
def sleep_for_interval(self, start_ts, nseconds):
delta = time.time() - start_ts
if delta < nseconds:
time.sleep(nseconds - (time.time() - start_ts))
def enqueue(self, task):
try:
self.huey.enqueue(task)
except QueueWriteException:
self._logger.error('Error enqueueing task: %s' % task)
else:
self.huey.emit_task('enqueued', task)
def loop(self, now=None):
raise NotImplementedError
class Worker(BaseProcess):
def __init__(self, huey, default_delay, max_delay, backoff, utc):
self.delay = self.default_delay = default_delay
self.max_delay = max_delay
self.backoff = backoff
self._logger = logging.getLogger('huey.consumer.Worker')
super(Worker, self).__init__(huey, utc)
def loop(self, now=None):
self._logger.debug('Checking for message')
task = None
exc_raised = True
try:
task = self.huey.dequeue()
except QueueReadException as exc:
self._logger.exception('Error reading from queue')
except QueueException:
self._logger.exception('Queue exception')
except KeyboardInterrupt:
raise
except:
self._logger.exception('Unknown exception')
else:
exc_raised = False
if task:
self.delay = self.default_delay
self.handle_task(task, now or self.get_now())
elif exc_raised or not self.huey.blocking:
self.sleep()
def sleep(self):
if self.delay > self.max_delay:
self.delay = self.max_delay
self._logger.debug('No messages, sleeping for: %s' % self.delay)
time.sleep(self.delay)
self.delay *= self.backoff
def handle_task(self, task, ts):
if not self.huey.ready_to_run(task, ts):
self._logger.info('Adding %s to schedule' % task)
self.add_schedule(task)
elif not self.is_revoked(task, ts):
self.process_task(task, ts)
else:
self._logger.debug('Task %s was revoked, not running' % task)
def process_task(self, task, ts):
self._logger.info('Executing %s' % task)
self.huey.emit_task('started', task)
try:
self.huey.execute(task)
except DataStorePutException:
self._logger.exception('Error storing result')
except:
self._logger.exception('Unhandled exception in worker thread')
self.huey.emit_task('error', task, error=True)
if task.retries:
self.huey.emit_task('retrying', task)
self.requeue_task(task, self.get_now())
else:
self.huey.emit_task('finished', task)
def requeue_task(self, task, ts):
task.retries -= 1
self._logger.info('Re-enqueueing task %s, %s tries left' %
(task.task_id, task.retries))
if task.retry_delay:
delay = datetime.timedelta(seconds=task.retry_delay)
task.execute_time = ts + delay
self._logger.debug('Execute %s at: %s' % (task, task.execute_time))
self.add_schedule(task)
else:
self.enqueue(task)
def add_schedule(self, task):
try:
self.huey.add_schedule(task)
except ScheduleAddException:
self._logger.error('Error adding task to schedule: %s' % task)
else:
self.huey.emit_task('scheduled', task)
def is_revoked(self, task, ts):
try:
if self.huey.is_revoked(task, ts, peek=False):
self.huey.emit_task('revoked', task)
return True
return False
except DataStoreGetException:
self._logger.error('Error checking if task is revoked: %s' % task)
return True
class Scheduler(BaseProcess):
def __init__(self, huey, interval, utc, periodic):
super(Scheduler, self).__init__(huey, utc)
self.interval = min(interval, 60)
self.periodic = periodic
if periodic:
# Determine the periodic task interval.
self._q, self._r = divmod(60, self.interval)
if not self._r:
self._q -= 1
self._counter = 0
self._logger = logging.getLogger('huey.consumer.Scheduler')
def loop(self, now=None):
now = now or self.get_now()
start = time.time()
for task in self.huey.read_schedule(now):
self._logger.info('Scheduling %s for execution' % task)
self.enqueue(task)
should_sleep = True
if self.periodic:
if self._counter == self._q:
if self._r:
self.sleep_for_interval(start, self._r)
self._logger.debug('Checking periodic tasks')
self._counter = 0
for task in self.huey.read_periodic(now):
self._logger.info('Scheduling periodic task %s.' % task)
self.enqueue(task)
self.sleep_for_interval(start, self.interval - self._r)
should_sleep = False
else:
self._counter += 1
if should_sleep:
self.sleep_for_interval(start, self.interval)
class Environment(object):
def get_stop_flag(self):
raise NotImplementedError
def create_process(self, runnable, name):
raise NotImplementedError
class ThreadEnvironment(Environment):
def get_stop_flag(self):
return threading.Event()
def create_process(self, runnable, name):
t = threading.Thread(target=runnable, name=name)
t.daemon = True
return t
class GreenletEnvironment(Environment):
def get_stop_flag(self):
return GreenEvent()
def create_process(self, runnable, name):
def run_wrapper():
gevent.sleep()
runnable()
gevent.sleep()
return Greenlet(run=run_wrapper)
class ProcessEnvironment(Environment):
def get_stop_flag(self):
return ProcessEvent()
def create_process(self, runnable, name):
p = Process(target=runnable, name=name)
p.daemon = True
return p
worker_to_environment = {
'thread': ThreadEnvironment,
'greenlet': GreenletEnvironment,
'gevent': GreenletEnvironment, # Same as greenlet.
'process': ProcessEnvironment,
}
class Consumer(object):
def __init__(self, huey, workers=1, periodic=True, initial_delay=0.1,
backoff=1.15, max_delay=10.0, utc=True, scheduler_interval=1,
worker_type='thread'):
self._logger = logging.getLogger('huey.consumer')
self.huey = huey
self.workers = workers
self.periodic = periodic
self.default_delay = initial_delay
self.backoff = backoff
self.max_delay = max_delay
self.utc = utc
self.scheduler_interval = max(min(scheduler_interval, 60), 1)
self.worker_type = worker_type
if worker_type not in worker_to_environment:
raise ValueError('worker_type must be one of %s.' %
|
davide-ceretti/DEPRECATED-googleappengine-djangae-skeleton | application/application/urls.py | Python | mit | 309 | 0 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from application.crud import views
urlpatterns = patterns(
| '',
url(r'^admin/', include(admin.site.urls)),
url(r'^_ah/', include('djangae.urls')),
url(r'^$', views.ItemCr | eateView.as_view(), name='index'),
)
|
antong/ldaptor | ldaptor/protocols/ldap/proxy.py | Python | lgpl-2.1 | 3,381 | 0.001775 | """LDAP protocol proxy server"""
from twisted.internet import reactor, defer
from ldaptor.protocols.ldap import ldapserver, ldapconnector, ldapclient
from ldaptor.protocols import pureldap
class Proxy(ldapserver.BaseLDAPServer):
protocol = ldapclient.LDAPClient
client = None
waitingConnect = []
unbound = False
def __init__(self, config):
"""
Initialize the object.
@param config: The configuration.
@type config: ldaptor.interfaces.ILDAPConfig
"""
ldapserver.BaseLDAPServer.__init__(self)
self.config = config
def _whenConnected(self, fn, *a, **kw):
| if self.client is None:
d = defer.Deferred()
self.waitingConnect.append((d, fn, a, kw))
return d
else:
return defer.maybeDeferred(fn, *a, **kw)
def _cbConnectionMade(self, proto):
self.client = proto
while self.waitingConnect:
d, fn, a, kw = self.waitingConnect.pop(0)
d2 = defer.maybeDeferred(fn, *a, **kw)
d2.chainDeferred( | d)
def _clientQueue(self, request, controls, reply):
# TODO controls
if request.needs_answer:
d = self.client.send_multiResponse(request, self._gotResponse, reply)
# TODO handle d errbacks
else:
self.client.send_noResponse(request)
def _gotResponse(self, response, reply):
reply(response)
# TODO this is ugly
return isinstance(response, (
pureldap.LDAPSearchResultDone,
pureldap.LDAPBindResponse,
))
def _failConnection(self, reason):
#TODO self.loseConnection()
return reason # TODO
def connectionMade(self):
clientCreator = ldapconnector.LDAPClientCreator(
reactor, self.protocol)
d = clientCreator.connect(
dn='',
overrides=self.config.getServiceLocationOverrides())
d.addCallback(self._cbConnectionMade)
d.addErrback(self._failConnection)
ldapserver.BaseLDAPServer.connectionMade(self)
def connectionLost(self, reason):
assert self.client is not None
if self.client.connected:
if not self.unbound:
self.client.unbind()
self.unbound = True
else:
self.client.transport.loseConnection()
self.client = None
ldapserver.BaseLDAPServer.connectionLost(self, reason)
def _handleUnknown(self, request, controls, reply):
self._whenConnected(self._clientQueue, request, controls, reply)
return None
def handleUnknown(self, request, controls, reply):
d = defer.succeed(request)
d.addCallback(self._handleUnknown, controls, reply)
return d
def handle_LDAPUnbindRequest(self, request, controls, reply):
self.unbound = True
self.handleUnknown(request, controls, reply)
if __name__ == '__main__':
"""
Demonstration LDAP proxy; passes all requests to localhost:389.
"""
from twisted.internet import protocol
from twisted.python import log
import sys
log.startLogging(sys.stderr)
factory = protocol.ServerFactory()
factory.protocol = lambda : Proxy(overrides={
'': ('localhost', 389),
})
reactor.listenTCP(10389, factory)
reactor.run()
|
greyshell/Pen-Test | web/sqli/models/__init__.py | Python | mit | 120 | 0 | #!/usr/bin/env python3
# author: greyshell
from .tblpost01 impor | t *
from .tblpost02 | import *
from .tblpost03 import *
|
probcomp/cgpm | src/cgpm.py | Python | apache-2.0 | 7,812 | 0.00064 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CGpm(object):
"""Interface for composable generative population models.
Composable generative population models provide a computational abstraction
for multivariate probability densities and stochastic samplers.
"""
def __init__(self, outputs, inputs, schema, rng):
"""Initialize the CGpm.
Parameters
----------
outputs : list<int>
List of endogenous variables whose joint distribution is modeled by
the CGpm. The CGpm is required to simulate and evaluate the log
density of an arbitrary susbet of output variables, by marginalizing
and/or conditioning on another (disjoint) subset of output
variables.
inputs : list<int>, optional
List of exogenous variables unmodeled by the CGpm which are needed
on a per-row basis. A full realization of all inputs (if any)
is required for each simulate and logpdf query.
schema : **kwargs
An arbitrary data structure used by the CGpm to initialize itself.
Often contains information about hyperparameters, parameters,
sufficient statistics, configuration settings, or metadata about
the input variables.
rng : numpy.random.RandomState
Source of entropy.
"""
raise NotImplementedError
def incorporate(self, rowid, observation, inputs=None):
"""Record an observation for `rowid` into the dataset.
rowid : int
A unique integer identifying the member.
observation : dict{int:value}
The observed values. The keys of `observation` must be a subset of the
`output` variables, and `value` must be type-matched based on
the statistical data type of that variable. Missing values may
be either omitted, or specified as float(nan).
inputs : dict{int:value}, optional
Values of all required `input` variables for the `rowid`.
"""
raise NotImplementedError
def unincorporate(self, rowid):
"""Remove all incorporated observations of `rowid` from the dataset."""
raise NotImplementedError
def logpdf(self, rowid, targets, constraints=None, inputs=None):
"""Return the density of `targets` given `constraints` and `inputs`.
Pr[targets | constraints; inputs]
rowid : int, or None to indicate a hypothetical row
Specifies the identity of the population member against which to
evaluate the log density.
targets : dict{int:value}
The keys of `targets` must be a subset of the `output` variables.
If `rowid` corresponds to an existing member, it is an error for
`targets` to contain any output variable for that `rowid` which has
already been incorporated.
constraints : dict{int:value}, optional
The keys of `constraints` must be a subset of the `output`
variables, and disjoint from the keys of `targets`. These
constraints serve as probabilistic conditions on the multivariate
output distribution. If `rowid` corresponds to an existing member,
it is an error for `constraints` to contain any output variable for
that `rowid` which has already been incorporated.
inputs : dict{int:value}, optional
The keys of `inputs` must contain all the cgpm's `input` variables,
if any. These values comprise a full realization of all exogenous
variables required by the cgpm. If `rowid` corresponds to an
existing member, then `inputs` is expected to be None.
"""
raise NotImplementedError
def simulate(self, rowid, query, constraints=None, inputs=None, N=None):
"""Return N iid samples of `targets` given `constraints` and `inputs`.
(X_1, X_2, ... X_N) ~iid Pr[targets | constraints; inputs]
rowid : int, or None to indicate a hypothetical row
Specifies the identity of the population member whose posterior
distribution over unobserved outputs to simulate from.
query : list<int>
List of `output` variables to simulate. If `rowid` corresponds to an
existing member, it is an error for `targets` to contain any output
variable for that `rowid` which has already been incorporated.
constraints : dict{int:value}, optional
The keys of `constraints` must be a subset of the `output`
variables, and disjoint from the keys of `targets`. These
constraints serve as probabilistic conditions on the multivariate
output distribution. If `rowid` corresponds to an existing member,
it is an error for `constraints` to contain any output variable for
that `rowid` which has already been incorporated.
inputs : dict{int:value}, optional
The keys of `inputs` must contain all the cgpm's `input` variables,
if any. These values comprise a full realization of all exogenous
variables required by the cgpm. If `rowid` corresponds to an
existing member, then `inputs` is expected to be None.
N : int, (optional, default None)
Number of samples to return. If None, returns a single sample as
a dictionary with size len(query), where each key is an `output`
variable and each value the sample for that dimension. If `N` is
is not None, a size N list of dictionaries will be returned, each
| corresponding to a single sample.
"""
raise NotImplementedError
def logpdf_score(self):
"""Return joint density of all observations and current latent state."""
raise NotImplementedError
def transition(self, **kwargs):
"""Apply an inference operator transitioning the internal state of CGpm.
**kwargs : arbitrary keyword arguments Opaque binary parsed by the CGpm
to apply inference over its latents. There are no restric | tions on
the learning mechanism, which may be based on optimization
(variational inference, maximum likelihood, EM, etc), Markov chain
Monte Carlo sampling (SMC, MH, etc), arbitrary heuristics, or
others.
"""
raise NotImplementedError
def to_metadata(self):
"""Return the binary (json-friendly) representation of the CGpm.
The returned B is expected to contain an entry ['factory'] which can
be used to deserialize the binary in the following way:
>> B = C.to_metadata()
>> modname, attrname = B['factory']
>> mod = importlib.import_module(modname)
>> builder = getattr(modname, attrname)
>> C = builder.from_metadata(binary)
"""
raise NotImplementedError
@staticmethod
def from_metadata(cls, metadata, rng=None):
"""Load CGpm from its binary representation.
Refer to the usage example in `to_metadata`.
"""
raise NotImplementedError
|
RyanWolfe/cloud-custodian | c7n/resources/apigw.py | Python | apache-2.0 | 781 | 0.00128 | # Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the | License for the specific language governing permissions and
# limitations under the License.
from c7n.manager import resources
from c7n.query import QueryResourceManager
@resources.register | ('rest-api')
class RestAPI(QueryResourceManager):
resource_type = "aws.apigateway.restapis"
|
dwalton76/rubiks-cube-NxNxN-solver | rubikscubennnsolver/RubiksCube444Misc.py | Python | mit | 105,442 | 0.000009 | # standard libraries
from typing import Dict, Set, Tuple
# 12-23 are high edges, make these U (1)
# 0-11 are low edges, make these D (6)
# https://github.com/cs0x7f/TPR-4x4x4-Solver/blob/master/src/FullCube.java
high_edges_444: Tuple[Tuple[int, int, int]] = (
(14, 2, 67), # Upper
(13, 9, 19),
(15, 8, 51),
(12, 15, | 35),
(21, 25, 76), # Left
(20, 24, 37),
(23, 57, 44), # Right
(22, 56, 69),
(18, 82, 46), # Down
(17, 89, 30),
(19, 88, 62),
(16, 95, 78),
)
low_edges_444: Tuple[Tuple[int, int, int]] = (
(2, 3, 66), # Upper
(1, 5, 18),
(3, 12, 50),
(0, 14, 34),
(9, 21, 72), # Left
(8, 28, 41),
(11, 53, 40), # Right
(10, 60, 73),
(6, 83, 47), # Down
(5, 85, 31),
| (7, 92, 63),
(4, 94, 79),
)
# These apply to 4x4x4 and 5x5x5
highlow_edge_mapping_combinations: Dict[int, Tuple[Set[Tuple[str]]]] = {
0: (set()),
2: (
set(("UB", "UL")),
set(("UB", "UR")),
set(("UB", "UF")),
set(("UB", "LB")),
set(("UB", "LF")),
set(("UB", "RB")),
set(("UB", "RF")),
set(("UB", "DB")),
set(("UB", "DL")),
set(("UB", "DR")),
set(("UB", "DF")),
set(("UL", "UR")),
set(("UL", "UF")),
set(("UL", "LB")),
set(("UL", "LF")),
set(("UL", "RB")),
set(("UL", "RF")),
set(("UL", "DB")),
set(("UL", "DL")),
set(("UL", "DR")),
set(("UL", "DF")),
set(("UR", "UF")),
set(("UR", "LB")),
set(("UR", "LF")),
set(("UR", "RB")),
set(("UR", "RF")),
set(("UR", "DB")),
set(("UR", "DL")),
set(("UR", "DR")),
set(("UR", "DF")),
set(("UF", "LB")),
set(("UF", "LF")),
set(("UF", "RB")),
set(("UF", "RF")),
set(("UF", "DB")),
set(("UF", "DL")),
set(("UF", "DR")),
set(("UF", "DF")),
set(("LB", "LF")),
set(("LB", "RB")),
set(("LB", "RF")),
set(("LB", "DB")),
set(("LB", "DL")),
set(("LB", "DR")),
set(("LB", "DF")),
set(("LF", "RB")),
set(("LF", "RF")),
set(("LF", "DB")),
set(("LF", "DL")),
set(("LF", "DR")),
set(("LF", "DF")),
set(("RB", "RF")),
set(("RB", "DB")),
set(("RB", "DL")),
set(("RB", "DR")),
set(("RB", "DF")),
set(("RF", "DB")),
set(("RF", "DL")),
set(("RF", "DR")),
set(("RF", "DF")),
set(("DB", "DL")),
set(("DB", "DR")),
set(("DB", "DF")),
set(("DL", "DR")),
set(("DL", "DF")),
set(("DR", "DF")),
),
4: (
set(("UB", "UL", "UR", "UF")),
set(("UB", "UL", "UR", "LB")),
set(("UB", "UL", "UR", "LF")),
set(("UB", "UL", "UR", "RB")),
set(("UB", "UL", "UR", "RF")),
set(("UB", "UL", "UR", "DB")),
set(("UB", "UL", "UR", "DL")),
set(("UB", "UL", "UR", "DR")),
set(("UB", "UL", "UR", "DF")),
set(("UB", "UL", "UF", "LB")),
set(("UB", "UL", "UF", "LF")),
set(("UB", "UL", "UF", "RB")),
set(("UB", "UL", "UF", "RF")),
set(("UB", "UL", "UF", "DB")),
set(("UB", "UL", "UF", "DL")),
set(("UB", "UL", "UF", "DR")),
set(("UB", "UL", "UF", "DF")),
set(("UB", "UL", "LB", "LF")),
set(("UB", "UL", "LB", "RB")),
set(("UB", "UL", "LB", "RF")),
set(("UB", "UL", "LB", "DB")),
set(("UB", "UL", "LB", "DL")),
set(("UB", "UL", "LB", "DR")),
set(("UB", "UL", "LB", "DF")),
set(("UB", "UL", "LF", "RB")),
set(("UB", "UL", "LF", "RF")),
set(("UB", "UL", "LF", "DB")),
set(("UB", "UL", "LF", "DL")),
set(("UB", "UL", "LF", "DR")),
set(("UB", "UL", "LF", "DF")),
set(("UB", "UL", "RB", "RF")),
set(("UB", "UL", "RB", "DB")),
set(("UB", "UL", "RB", "DL")),
set(("UB", "UL", "RB", "DR")),
set(("UB", "UL", "RB", "DF")),
set(("UB", "UL", "RF", "DB")),
set(("UB", "UL", "RF", "DL")),
set(("UB", "UL", "RF", "DR")),
set(("UB", "UL", "RF", "DF")),
set(("UB", "UL", "DB", "DL")),
set(("UB", "UL", "DB", "DR")),
set(("UB", "UL", "DB", "DF")),
set(("UB", "UL", "DL", "DR")),
set(("UB", "UL", "DL", "DF")),
set(("UB", "UL", "DR", "DF")),
set(("UB", "UR", "UF", "LB")),
set(("UB", "UR", "UF", "LF")),
set(("UB", "UR", "UF", "RB")),
set(("UB", "UR", "UF", "RF")),
set(("UB", "UR", "UF", "DB")),
set(("UB", "UR", "UF", "DL")),
set(("UB", "UR", "UF", "DR")),
set(("UB", "UR", "UF", "DF")),
set(("UB", "UR", "LB", "LF")),
set(("UB", "UR", "LB", "RB")),
set(("UB", "UR", "LB", "RF")),
set(("UB", "UR", "LB", "DB")),
set(("UB", "UR", "LB", "DL")),
set(("UB", "UR", "LB", "DR")),
set(("UB", "UR", "LB", "DF")),
set(("UB", "UR", "LF", "RB")),
set(("UB", "UR", "LF", "RF")),
set(("UB", "UR", "LF", "DB")),
set(("UB", "UR", "LF", "DL")),
set(("UB", "UR", "LF", "DR")),
set(("UB", "UR", "LF", "DF")),
set(("UB", "UR", "RB", "RF")),
set(("UB", "UR", "RB", "DB")),
set(("UB", "UR", "RB", "DL")),
set(("UB", "UR", "RB", "DR")),
set(("UB", "UR", "RB", "DF")),
set(("UB", "UR", "RF", "DB")),
set(("UB", "UR", "RF", "DL")),
set(("UB", "UR", "RF", "DR")),
set(("UB", "UR", "RF", "DF")),
set(("UB", "UR", "DB", "DL")),
set(("UB", "UR", "DB", "DR")),
set(("UB", "UR", "DB", "DF")),
set(("UB", "UR", "DL", "DR")),
set(("UB", "UR", "DL", "DF")),
set(("UB", "UR", "DR", "DF")),
set(("UB", "UF", "LB", "LF")),
set(("UB", "UF", "LB", "RB")),
set(("UB", "UF", "LB", "RF")),
set(("UB", "UF", "LB", "DB")),
set(("UB", "UF", "LB", "DL")),
set(("UB", "UF", "LB", "DR")),
set(("UB", "UF", "LB", "DF")),
set(("UB", "UF", "LF", "RB")),
set(("UB", "UF", "LF", "RF")),
set(("UB", "UF", "LF", "DB")),
set(("UB", "UF", "LF", "DL")),
set(("UB", "UF", "LF", "DR")),
set(("UB", "UF", "LF", "DF")),
set(("UB", "UF", "RB", "RF")),
set(("UB", "UF", "RB", "DB")),
set(("UB", "UF", "RB", "DL")),
set(("UB", "UF", "RB", "DR")),
set(("UB", "UF", "RB", "DF")),
set(("UB", "UF", "RF", "DB")),
set(("UB", "UF", "RF", "DL")),
set(("UB", "UF", "RF", "DR")),
set(("UB", "UF", "RF", "DF")),
set(("UB", "UF", "DB", "DL")),
set(("UB", "UF", "DB", "DR")),
set(("UB", "UF", "DB", "DF")),
set(("UB", "UF", "DL", "DR")),
set(("UB", "UF", "DL", "DF")),
set(("UB", "UF", "DR", "DF")),
set(("UB", "LB", "LF", "RB")),
set(("UB", "LB", "LF", "RF")),
set(("UB", "LB", "LF", "DB")),
set(("UB", "LB", "LF", "DL")),
set(("UB", "LB", "LF", "DR")),
set(("UB", "LB", "LF", "DF")),
set(("UB", "LB", "RB", "RF")),
set(("UB", "LB", "RB", "DB")),
set(("UB", "LB", "RB", "DL")),
set(("UB", "LB", "RB", "DR")),
set(("UB", "LB", "RB", "DF")),
set(("UB", "LB", "RF", "DB")),
set(("UB", "LB", "RF", "DL")),
set(("UB", "LB", "RF", "DR")),
set(("UB", "LB", "RF", "DF")),
set(("UB", "LB", "DB", "DL")),
set(("UB", "LB", "DB", "DR")),
set(("UB", "LB", "DB", "DF")),
set(("UB", "LB", "DL", "DR")),
set(("UB", "LB", "DL", "DF")),
set(("UB", "LB", "DR", "DF")),
set(("UB", "LF", "RB", "RF")),
set(("UB", "LF", "RB", "DB")),
set(("UB", "LF", "RB", "DL")),
set(("UB", "LF", "RB", "DR")),
set(("UB", "LF", "RB", "DF")),
set(("UB", "LF", "RF", "DB")),
set(("UB", "LF", "RF", "DL")),
set(("UB", "LF", "RF", "DR")),
set(("UB", "LF", "RF", "DF")),
set(("UB", "LF", "DB" |
flgiordano/netcash | +/google-cloud-sdk/lib/surface/bigquery/__init__.py | Python | bsd-3-clause | 2,500 | 0.0016 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main command group for gcloud bigquery.
"""
import urlparse
from googlecloudsdk.api_lib.bigquery import bigquery
from googlecloudsdk.calliope import base
from googlecloudsdk.core import apis
from googlecloudsdk.core import cli
from googlecloudsdk.core import properties
from googlecloudsdk.core import resolvers
from googlecloudsdk.core import resources
from googlecloudsdk.core.credentials import store as c_store
SERVICE_NAME = 'bigquery'
BIGQUERY_MESSAGES_MODULE_KEY = 'bigquery-messages-module'
APITOOLS_CLIENT_KEY = 'bigquery-apitools-client'
BIGQUERY_REGISTRY_KEY = 'bigquery-registry'
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Bigquery(base.Group):
"""A group of commands for using BigQuery.
"""
def Filter(self, context, args):
"""I | nitialize context for bigquery commands.
Args:
context: The current context.
args: The argparse name | space that was specified on the CLI or API.
Returns:
The updated context.
"""
resources.SetParamDefault(
api='bigquery', collection=None, param='projectId',
resolver=resolvers.FromProperty(properties.VALUES.core.project))
# TODO(user): remove command dependence on these.
context[BIGQUERY_MESSAGES_MODULE_KEY] = apis.GetMessagesModule(
'bigquery', 'v2')
context[APITOOLS_CLIENT_KEY] = apis.GetClientInstance(
'bigquery', 'v2', http=self.Http())
context[BIGQUERY_REGISTRY_KEY] = resources.REGISTRY
# Inject bigquery backend params.
bigquery.Bigquery.SetResourceParser(resources.REGISTRY)
bigquery.Bigquery.SetApiEndpoint(
self.Http(), properties.VALUES.api_endpoint_overrides.bigquery.Get())
@staticmethod
def Args(parser):
parser.add_argument(
'--fingerprint-job-id',
action='store_true',
help='Whether to use a job id that is derived from a fingerprint of '
'the job configuration.')
|
ajdawson/jabr | lib/parser.py | Python | mit | 2,590 | 0.002703 | """Parse ISI journal abbreviations website."""
# Copyright (c) 2012 Andrew Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
class ISIJournalPar | ser(HTMLParser):
"""Parser for ISI Web of Knowledge journal abbreviation pages.
**Note:**
Due to the ISI pages containing malformed html | one must call
the :py:meth:`ISIJournalParser.finalize` method once
parsing is complete to ensure all entries are read correctly.
"""
def __init__(self):
HTMLParser.__init__(self)
self.journal_names = []
self.journal_abbreviations = []
self.parser_state = None
self.data_entities = None
def handle_starttag(self, tag, attrs):
if tag not in ('dd', 'dt'):
return
self._storedata()
self.parser_state = tag
self.data_entities = []
def handle_data(self, data):
if self.parser_state in ('dd', 'dt'):
self.data_entities.append(data)
def _storedata(self):
if self.data_entities and self.parser_state:
if self.parser_state == 'dt':
self.journal_names.append(''.join(self.data_entities).strip())
elif self.parser_state == 'dd':
self.journal_abbreviations.append(''.join(self.data_entities).strip())
def finalize(self):
"""Ensures all data is stored.
This method must be called when parsing is complete.
"""
self._storedata()
|
brodyberg/autorest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/Url/autoresturltestservice/__init__.py | Python | mit | 714 | 0.002801 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root | for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .auto_rest_url_test_service import AutoRestUrlTestService, AutoRestUrlTestServiceConfiguration
from .version import VERSION
__all__ = [
| 'AutoRestUrlTestService',
'AutoRestUrlTestServiceConfiguration'
]
__version__ = VERSION
|
alerta/alerta-contrib | plugins/prometheus/setup.py | Python | mit | 663 | 0 |
from setuptools import setup, find_packages
version = '5.4.0'
setup(
name="alerta-prometheus",
version=version,
description='Alerta plugin for Prometheus Alertmanager',
url='https://github.com/alerta/alerta-contrib',
license='MIT',
author='Nic | k Satterly',
author_email='nick.satterly@theguardian.com',
packages=find_packages(),
py_modules=['alerta_prometheus'],
install_requires=[
'requests',
'alerta-server>=4.10.1'
],
include_package_data=True,
zip_safe=Tr | ue,
entry_points={
'alerta.plugins': [
'prometheus = alerta_prometheus:AlertmanagerSilence'
]
}
)
|
halilozercan/halocoin | halocoin/service.py | Python | apache-2.0 | 11,707 | 0.001452 | import queue
import sys
import threading
import traceback
from halocoin import tools
from halocoin.ntwrk.message import Order
class NoExceptionQueue(queue.Queue):
"""
In some cases, queue overflow is ignored. Necessary try, except blocks
make the code less readable. This is a special queue class that
simply ignores overflow.
"""
def __init__(self, maxsize=0):
queue.Queue.__init__(self, maxsize)
def put(self, item, block=True, timeout=None):
try:
queue.Queue.put(self, item, block, timeout)
except queue.Full:
pass
class Service:
"""
Service is a background job synchronizer.
It consists of an event loop, side threads and annotation helpers.
Event loop starts listening for upcoming events after registration.
If service is alive, all annotated methods are run in background
thread and results return depending on annotation type.
Side threads are executed repeatedly until service shuts down or
thread is forcefully closed from another thread. Each side-thread should
also check for infinite loops.
"""
INIT = 0
RUNNING = 1
STOPPED = 2
TERMINATED = 3
def __init__(self, name):
self.event_thread = threading.Thread()
self.into_service_queue = NoExceptionQueue(1000)
self.signals = {}
self.service_responses = {}
self.name = name
self.__state = None
self.execution_lock = threading.Lock()
self.__threads = {}
def register(self):
def service_target(service):
service.set_state(Service.RUNNING)
while service.get_state() == Service.RUNNING:
try:
order = service.into_service_queue.get(timeout=1)
if isinstance(order, Order):
result = Service.execute_order(service, order)
self.service_responses[order.id] = result
self.signals[order.id].set()
service.into_service_queue.task_done()
except TypeError:
service.set_state(Service.STOPPED)
self.service_responses[order.id] = True
self.signals[order.id].set()
except queue.Empty:
pass
def threaded_wrapper(func):
def insider(*args, **kwargs):
while self.__threads[func.__name__]["running"]:
try:
func(*args, **kwargs)
except Exception as e:
tools.log('Exception occurred at thread {}\n{}'.format(func.__name__, traceback.format_exc()))
return 0
return insider
cont = self.on_register()
if not cont:
tools.log("Service is not going to continue with registering!")
return False
# Start event loop
self.event_thread = threading.Thread(target=service_target, args=(self,), name=self.name)
self.event_thread.start()
# Start all side-threads
for clsMember in self.__class__.__dict__.values():
if hasattr(clsMember, "decorator") and clsMember.decorator == threaded.__name__:
new_thread = threading.Thread(target=threaded_wrapper(clsMember._original),
args=(self,),
name=clsMember._original.__name__)
self.__threads[clsMember._original.__name__] = {
"running": True,
"thread": new_thread
}
new_thread.start()
return True
# Lifecycle events
def on_register(self):
"""
Called just before registration starts.
:return: bool indicating whether registration should continue
"""
return True
def on_close(self):
"""
Called after everything is shut down.
:return: Irrelevant
"""
return True
def join(self):
"""
Join all side-threads and event loop in the end.
:return: None
"""
for thread_dict in self.__threads.values():
thread_dict["thread"].join()
self.into_service_queue.join()
# If join is called from the service instance, there is no need to join.
# Thread wants to destory itself
if threading.current_thread().name != self.event_thread.name:
self.event_thread.join()
def unregister(self, join=False):
"""
Disconnect the service background operations.
Close and join all side-threads and event loop.
:return: None
"""
self.execute('__shutdown_service__', True, args=(), kwargs={})
if join:
self.join()
self.on_close()
def execute(self, action, expect_result, args, kwargs):
"""
Execute an order that is triggered by annotated methods.
This method should be treated as private.
:param action: Action name
:param expect_result: Whether to wait for result of action
:param args: Argument list for method
:param kwargs: Keyword argument list for method
:return: result of action or None
"""
if self.get_state() != Service.RUNNING:
return None
result = None
new_order = Order(action, args, kwargs)
# This is already event thread and someone called a synced function.
# We can run it now.
if threading.current_thread().name == self.event_thread.name:
result = Service.execute_order(self, new_order)
return result
self.signals[new_order.id] = threading.Event()
self.into_service_queue.put(new_order)
if expect_result:
try:
if self.signals[new_order.id].wait():
response = self.service_responses[new_order.id]
del self.signals[new_order.id]
del self.service_responses[new_order.id]
result = response
else:
tools.log('Service wait timed out', self.__class__.__name__)
except:
tools.log(sys.exc_info())
pass
return result
@staticmethod
def execute_order(service, order):
"""
Directly executes the order on service instance.
Makes no thread checks, no synchronization attempts.
:param service: Service instance
:param order: Order object
:return: result of the execution
"""
result = False
if order.action == '__close_threaded__':
result = True
service.__threads[order.args[0]]["running"] = False
elif order.action == '__shutdown_service__':
result = True
service.set_state(Service.STOPPED)
elif hasattr(service, order.action):
try:
result = getattr(service, order.action)._original(service, *order.args, **order.kwargs)
except:
result = None
tools.log(sys.exc_info())
return result
def get_state(self): # () -> (INIT|RUNNING|STOPPED|TERMINATED)
"""
:return: State of the service
"""
return self.__state
def set_state(self, state): # (INIT|RUNNING | |STOPPED|TERMINATED) -> ()
"""
Set the current state of the service.
This should never be used outside of the service.
Treat as private method.
:param state: New state
:return: None
"""
if state == Service.STOPPED or state == Service.TERMINATED:
tools.log('{} got stopped'.format(self.__class__.__name__))
| for thread_name in self.__threads.keys():
self.__threads[thread_name]["running"] = False
self.__state = state
def close_threaded(self):
"""
Close current side-thread.
:return: None
"""
thread_name = threading.current_thread().name
self.execute(act |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.